import numpy as np import imutils from PyQt5.QtCore import QSize from PyQt5.QtGui import QIcon, QColor from PyQt5.QtWidgets import QListWidgetItem, QPushButton from flags import * import cv2 import random from PIL import Image class MyItem(QListWidgetItem): def __init__(self, name=None, parent=None): super(MyItem, self).__init__(name, parent=parent) self.setIcon(QIcon('icons/color.png')) self.setSizeHint(QSize(60, 60)) # size def get_params(self): protected = [v for v in dir(self) if v.startswith('_') and not v.startswith('__')] param = {} for v in protected: param[v.replace('_', '', 1)] = self.__getattribute__(v) return param def update_params(self, param): for k, v in param.items(): if '_' + k in dir(self): self.__setattr__('_' + k, v) class GrayingItem(MyItem): def __init__(self, parent=None): super(GrayingItem, self).__init__(' 灰度化 ', parent=parent) self._mode = BGR2GRAY_COLOR def __call__(self, img): img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return img class FilterItem(MyItem): def __init__(self, parent=None): super().__init__('平滑处理', parent=parent) self._ksize = 3 self._kind = MEAN_FILTER self._sigmax = 0 def __call__(self, img): if self._kind == MEAN_FILTER: img = cv2.blur(img, (self._ksize, self._ksize)) elif self._kind == GAUSSIAN_FILTER: img = cv2.GaussianBlur(img, (self._ksize, self._ksize), self._sigmax) elif self._kind == MEDIAN_FILTER: img = cv2.medianBlur(img, self._ksize) return img class MorphItem(MyItem): def __init__(self, parent=None): super().__init__(' 形态学 ', parent=parent) self._ksize = 3 self._op = ERODE_MORPH_OP self._kshape = RECT_MORPH_SHAPE def __call__(self, img): op = MORPH_OP[self._op] kshape = MORPH_SHAPE[self._kshape] kernal = cv2.getStructuringElement(kshape, (self._ksize, self._ksize)) img = cv2.morphologyEx(img, self._op, kernal) return img class GradItem(MyItem): def __init__(self, parent=None): super().__init__('图像梯度', parent=parent) self._kind = SOBEL_GRAD self._ksize = 3 self._dx = 1 self._dy = 0 def __call__(self, img): if self._dx == 0 and self._dy == 0 and self._kind != LAPLACIAN_GRAD: self.setBackground(QColor(255, 0, 0)) self.setText('图像梯度 (无效: dx与dy不同时为0)') else: self.setBackground(QColor(200, 200, 200)) self.setText('图像梯度') if self._kind == SOBEL_GRAD: img = cv2.Sobel(img, -1, self._dx, self._dy, self._ksize) elif self._kind == SCHARR_GRAD: img = cv2.Scharr(img, -1, self._dx, self._dy) elif self._kind == LAPLACIAN_GRAD: img = cv2.Laplacian(img, -1) return img class ThresholdItem(MyItem): def __init__(self, parent=None): super().__init__('阈值处理', parent=parent) self._thresh = 127 self._maxval = 255 self._method = BINARY_THRESH_METHOD def __call__(self, img): method = THRESH_METHOD[self._method] img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = cv2.threshold(img, self._thresh, self._thresh, method)[1] img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return img class EdgeItem(MyItem): def __init__(self, parent=None): super(EdgeItem, self).__init__('边缘检测', parent=parent) self._thresh1 = 20 self._thresh2 = 100 def __call__(self, img): img = cv2.Canny(img, threshold1=self._thresh1, threshold2=self._thresh2) img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return img class ContourItem(MyItem): def __init__(self, parent=None): super(ContourItem, self).__init__('轮廓检测', parent=parent) self._mode = TREE_CONTOUR_MODE self._method = SIMPLE_CONTOUR_METHOD self._bbox = NORMAL_CONTOUR def __call__(self, img): mode = CONTOUR_MODE[self._mode] method = CONTOUR_METHOD[self._method] img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cnts, _ = cv2.findContours(img, mode, method) img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) if self._bbox == RECT_CONTOUR: bboxs = [cv2.boundingRect(cnt) for cnt in cnts] print(bboxs) for x, y, w, h in bboxs: img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), thickness=2) elif self._bbox == MINRECT_CONTOUR: bboxs = [np.int0(cv2.boxPoints(cv2.minAreaRect(cnt))) for cnt in cnts] img = cv2.drawContours(img, bboxs, -1, (255, 0, 0), thickness=2) elif self._bbox == MINCIRCLE_CONTOUR: circles = [cv2.minEnclosingCircle(cnt) for cnt in cnts] print(circles) for (x, y), r in circles: img = cv2.circle(img, (int(x), int(y)), int(r), (255, 0, 0), thickness=2) elif self._bbox == NORMAL_CONTOUR: img = cv2.drawContours(img, cnts, -1, (255, 0, 0), thickness=2) return img class EqualizeItem(MyItem): def __init__(self, parent=None): super().__init__(' 均衡化 ', parent=parent) self._blue = True self._green = True self._red = True def __call__(self, img): b, g, r = cv2.split(img) if self._blue: b = cv2.equalizeHist(b) if self._green: g = cv2.equalizeHist(g) if self._red: r = cv2.equalizeHist(r) return cv2.merge((b, g, r)) class HoughLineItem(MyItem): def __init__(self, parent=None): super(HoughLineItem, self).__init__('直线检测', parent=parent) self._rho = 1 self._theta = np.pi / 180 self._thresh = 10 self._min_length = 20 self._max_gap = 5 def __call__(self, img): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) lines = cv2.HoughLinesP(img, self._rho, self._theta, self._thresh, minLineLength=self._min_length, maxLineGap=self._max_gap) img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) if lines is None: return img for line in lines: for x1, y1, x2, y2 in line: img = cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), thickness=2) return img class GammaItem(MyItem): def __init__(self, parent=None): super(GammaItem, self).__init__('伽马校正', parent=parent) self._gamma = 1 def __call__(self, img): gamma_table = [np.power(x / 255.0, self._gamma) * 255.0 for x in range(256)] gamma_table = np.round(np.array(gamma_table)).astype(np.uint8) return cv2.LUT(img, gamma_table) # 重构: # 裁剪: class RotateAnyItem(MyItem): # 任意旋转 def __init__(self, parent=None): super(RotateAnyItem, self).__init__('任意旋转', parent=parent) self._angle = 0 def __call__(self, img): img = imutils.rotate_bound(img, self._angle) return img class RotateLeftItem(MyItem): # 90度旋转 def __init__(self, parent=None): super(RotateLeftItem, self).__init__('向左旋转', parent=parent) def __call__(self, img): img = imutils.rotate_bound(img, -90) return img class RotateRightItem(MyItem): # 90度旋转 def __init__(self, parent=None): super(RotateRightItem, self).__init__('向右旋转', parent=parent) def __call__(self, img): img = imutils.rotate_bound(img, 90) return img class UpDownItem(MyItem): def __init__(self, parent=None): super(UpDownItem, self).__init__('上下翻折', parent=parent) def __call__(self, img): img = cv2.flip(img, 0) return img # 光效 class ExposureItem(MyItem): def __init__(self, parent=None): super(ExposureItem, self).__init__('曝光', parent=parent) self._alpha = 1 def __call__(self, img): blank = np.zeros(img.shape, img.dtype) img = cv2.addWeighted(img, self._alpha, blank, 1 - self._alpha, 0) return img class LightItem(MyItem): def __init__(self, parent=None): super(LightItem, self).__init__('亮度调节', parent=parent) self._beta = 0 def __call__(self, img): blank = np.zeros(img.shape, img.dtype) img = cv2.addWeighted(img, 1, blank, 0, self._beta) return img class ContrastItem(MyItem): def __init__(self, parent=None): super(ContrastItem, self).__init__('对比度调节', parent=parent) self._alpha = 1 def __call__(self, img): dst = np.ones(img.shape) img = np.uint8(np.clip(self._alpha * (img - 127 * dst) + 127 * dst, 0, 255)) return img # 色彩: class ColorTemperatureItem(MyItem): def __init__(self, parent=None): super(ColorTemperatureItem, self).__init__('色温调节', parent=parent) self._n = 50 def __call__(self, img): level = self._n / 2 # src = img.clone() info = img.shape row = info[0] col = info[1] for i in range(0, row): for j in range(0, col): (r, b, g) = img[i, j] rr = r - level bb = b - level gg = g + level if rr > 255: rr = 255 elif rr < 0: rr = 0 if bb > 255: bb = 255 elif bb < 0: bb = 0 if gg > 255: gg = 255 elif gg < 0: gg = 0 img[i, j] = (rr, bb, gg) return img class HueItem(MyItem): def __init__(self, parent=None): super(HueItem, self).__init__('色调调节', parent=parent) self._n = 50 def __call__(self, img): level = self._n / 2 # src = img.clone() info = img.shape row = info[0] col = info[1] for i in range(0, row): for j in range(0, col): (r, b, g) = img[i, j] rr = r + level bb = b - level gg = g + level if rr > 255: rr = 255 elif rr < 0: rr = 0 if bb > 255: bb = 255 elif bb < 0: bb = 0 if gg > 255: gg = 255 elif gg < 0: gg = 0 img[i, j] = (rr, bb, gg) return img class SaturationItem(MyItem): def __init__(self, parent=None): super(SaturationItem, self).__init__('饱和度调节', parent=parent) self._x = 0 def __call__(self, img): img_t = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # 获取hsv h, s, v = cv2.split(img_t) # 增加饱和度 饱和度越低,越接近灰度图像 s1 = np.clip(cv2.add(s, self._x), 0, 255) img = np.uint8(cv2.merge((h, s1, v))) img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR) return img # HSL: class HSLRedItem(MyItem): def __init__(self, parent=None): super(HSLRedItem, self).__init__('红色', parent=parent) self._red_param_s = 1.0 self._red_param_v = 1.0 def __call__(self, img): img_hsl = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) info = img.shape height = info[0] width = info[1] value1 = self._red_param_v - 1.0 value2 = self._red_param_s - 1.0 for m in range(height): for w in range(width): h, s, v = img_hsl[m, w] b, g, r = img[m, w] tag = abs((int(b) - int(g)) * (int(g) - int(r))) if 43 < s < 255 and 46 < v < 255 and tag > 1600: if 0 < h < 10 or 156 < h < 180: b = (b + (b - v) * value2) g = (g + (g - v) * value2) r = (r + (r - v) * value2) b = b + value1 * (255 - b) g = g + value1 * (255 - g) r = r + value1 * (255 - r) img[m, w] = [b, g, r] return img class HSLOrangeItem(MyItem): def __init__(self, parent=None): super(HSLOrangeItem, self).__init__('橙色', parent=parent) self._orange_param_s = 1.0 self._orange_param_v = 1.0 def __call__(self, img): img_hsl = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) info = img.shape height = info[0] width = info[1] value1 = self._orange_param_v - 1.0 value2 = self._orange_param_s - 1.0 for m in range(height): for w in range(width): h, s, v = img_hsl[m, w] b, g, r = img[m, w] tag = abs((int(b) - int(g)) * (int(g) - int(r))) if 43 < s < 255 and 46 < v < 255 and tag > 1600: if 11 < h < 25: b = (b + (b - v) * value2) g = (g + (g - v) * value2) r = (r + (r - v) * value2) b = b + value1 * (255 - b) g = g + value1 * (255 - g) r = r + value1 * (255 - r) img[m, w] = [b, g, r] return img class HSLYellowItem(MyItem): def __init__(self, parent=None): super(HSLYellowItem, self).__init__('黄色', parent=parent) self._yellow_param_s = 1.0 self._yellow_param_v = 1.0 def __call__(self, img): img_hsl = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) info = img.shape height = info[0] width = info[1] value1 = self._yellow_param_v - 1.0 value2 = self._yellow_param_s - 1.0 for m in range(height): for w in range(width): h, s, v = img_hsl[m, w] b, g, r = img[m, w] tag = abs((int(b) - int(g)) * (int(g) - int(r))) if 43 < s < 255 and 46 < v < 255 and tag > 1600: if 26 < h < 34: b = (b + (b - v) * value2) g = (g + (g - v) * value2) r = (r + (r - v) * value2) b = b + value1 * (255 - b) g = g + value1 * (255 - g) r = r + value1 * (255 - r) img[m, w] = [b, g, r] return img class HSLGreenItem(MyItem): def __init__(self, parent=None): super(HSLGreenItem, self).__init__('绿色', parent=parent) self._green_param_s = 1.0 self._green_param_v = 1.0 def __call__(self, img): img_hsl = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) info = img.shape height = info[0] width = info[1] value1 = self._green_param_v - 1.0 value2 = self._green_param_s - 1.0 for m in range(height): for w in range(width): h, s, v = img_hsl[m, w] b, g, r = img[m, w] tag = abs((int(b) - int(g)) * (int(g) - int(r))) if 43 < s < 255 and 46 < v < 255 and tag > 1600: if 35 < h < 77: b = (b + (b - v) * value2) g = (g + (g - v) * value2) r = (r + (r - v) * value2) b = b + value1 * (255 - b) g = g + value1 * (255 - g) r = r + value1 * (255 - r) img[m, w] = [b, g, r] return img class HSLCyanItem(MyItem): def __init__(self, parent=None): super(HSLCyanItem, self).__init__('青色', parent=parent) self._cyan_param_s = 1.0 self._cyan_param_v = 1.0 def __call__(self, img): img_hsl = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) info = img.shape height = info[0] width = info[1] value1 = self._cyan_param_v - 1.0 value2 = self._cyan_param_s - 1.0 for m in range(height): for w in range(width): h, s, v = img_hsl[m, w] b, g, r = img[m, w] tag = abs((int(b) - int(g)) * (int(g) - int(r))) if 43 < s < 255 and 46 < v < 255 and tag > 1600: if 78 < h < 99: b = (b + (b - v) * value2) g = (g + (g - v) * value2) r = (r + (r - v) * value2) b = b + value1 * (255 - b) g = g + value1 * (255 - g) r = r + value1 * (255 - r) img[m, w] = [b, g, r] return img class HSLBlueItem(MyItem): def __init__(self, parent=None): super(HSLBlueItem, self).__init__('蓝色', parent=parent) self._blue_param_s = 1.0 self._blue_param_v = 1.0 def __call__(self, img): img_hsl = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) info = img.shape height = info[0] width = info[1] value1 = self._blue_param_v - 1.0 value2 = self._blue_param_s - 1.0 for m in range(height): for w in range(width): h, s, v = img_hsl[m, w] b, g, r = img[m, w] tag = abs((int(b) - int(g)) * (int(g) - int(r))) if 43 < s < 255 and 46 < v < 255 and tag > 1600: if 100 < h < 124: b = (b + (b - v) * value2) g = (g + (g - v) * value2) r = (r + (r - v) * value2) b = b + value1 * (255 - b) g = g + value1 * (255 - g) r = r + value1 * (255 - r) img[m, w] = [b, g, r] return img class HSLPurpleItem(MyItem): def __init__(self, parent=None): super(HSLPurpleItem, self).__init__('紫色', parent=parent) self._purple_param_s = 1.0 self._purple_param_v = 1.0 def __call__(self, img): img_hsl = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) info = img.shape height = info[0] width = info[1] value1 = self._purple_param_v - 1.0 value2 = self._purple_param_s - 1.0 for m in range(height): for w in range(width): h, s, v = img_hsl[m, w] b, g, r = img[m, w] tag = abs((int(b) - int(g)) * (int(g) - int(r))) if 43 < s < 255 and 46 < v < 255 and tag > 1600: if 125 < h < 155: b = (b + (b - v) * value2) g = (g + (g - v) * value2) r = (r + (r - v) * value2) b = b + value1 * (255 - b) g = g + value1 * (255 - g) r = r + value1 * (255 - r) img[m, w] = [b, g, r] return img class PixelateItem(MyItem): def __init__(self, parent=None): super(PixelateItem, self).__init__('像素化', parent=parent) self._size = 100 def __call__(self, img): height, width = img.shape[:2] # Desired "pixelated" size w, h = (self._size, self._size) # Resize input to "pixelated" size temp = cv2.resize(img, (w, h), interpolation=cv2.INTER_LINEAR) # Initialize output image img = cv2.resize(temp, (width, height), interpolation=cv2.INTER_NEAREST) return img class BlurItem(MyItem): def __init__(self, parent=None): super(BlurItem, self).__init__('模糊', parent=parent) self._size = 5 def __call__(self, img): img = cv2.blur(img, (self._size, self._size)) return img class CandyStyleTransformItem(MyItem): def __init__(self, parent=None): super(CandyStyleTransformItem, self).__init__('candy', parent=parent) def __call__(self, img): net = cv2.dnn.readNetFromTorch("./style/candy.t7") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # img = cv2.imread("D:\\t\\1.jpg") # res = cv2.resize(img, (500, 300)) row, column, _ = img.shape blob = cv2.dnn.blobFromImage(img, 1.0, (column, row), (103.939, 116.779, 123.680), swapRB=False, crop=False) net.setInput(blob) out = net.forward() out = out.reshape(3, out.shape[2], out.shape[3]) out[0] += 103.939 out[1] += 116.779 out[2] += 123.680 # output = out/255 out = out.transpose(1, 2, 0) # output = output.transpose(1, 2, 0) # cv2.imshow("out", output) cv2.imwrite("tmp.jpg", out) out2 = cv2.imread("tmp.jpg") # out_res = cv2.resize(out, (500, 300)) return out2 class CompositionStyleTransformItem(MyItem): def __init__(self, parent=None): super(CompositionStyleTransformItem, self).__init__('composition', parent=parent) def __call__(self, img): net = cv2.dnn.readNetFromTorch("./style/composition_vii.t7") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # img = cv2.imread("D:\\t\\1.jpg") # res = cv2.resize(img, (500, 300)) row, column, _ = img.shape blob = cv2.dnn.blobFromImage(img, 1.0, (column, row), (103.939, 116.779, 123.680), swapRB=False, crop=False) net.setInput(blob) out = net.forward() out = out.reshape(3, out.shape[2], out.shape[3]) out[0] += 103.939 out[1] += 116.779 out[2] += 123.680 # output = out/255 out = out.transpose(1, 2, 0) # output = output.transpose(1, 2, 0) # cv2.imshow("out", output) cv2.imwrite("tmp.jpg", out) out2 = cv2.imread("tmp.jpg") # out_res = cv2.resize(out, (500, 300)) return out2 class FeathersStyleTransformItem(MyItem): def __init__(self, parent=None): super(FeathersStyleTransformItem, self).__init__('feathers', parent=parent) def __call__(self, img): net = cv2.dnn.readNetFromTorch("./style/feathers.t7") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # img = cv2.imread("D:\\t\\1.jpg") # res = cv2.resize(img, (500, 300)) row, column, _ = img.shape blob = cv2.dnn.blobFromImage(img, 1.0, (column, row), (103.939, 116.779, 123.680), swapRB=False, crop=False) net.setInput(blob) out = net.forward() out = out.reshape(3, out.shape[2], out.shape[3]) out[0] += 103.939 out[1] += 116.779 out[2] += 123.680 # output = out/255 out = out.transpose(1, 2, 0) # output = output.transpose(1, 2, 0) # cv2.imshow("out", output) cv2.imwrite("tmp.jpg", out) out2 = cv2.imread("tmp.jpg") # out_res = cv2.resize(out, (500, 300)) return out2 class MuseStyleTransformItem(MyItem): def __init__(self, parent=None): super(MuseStyleTransformItem, self).__init__('la_muse', parent=parent) def __call__(self, img): net = cv2.dnn.readNetFromTorch("./style/la_muse.t7") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # img = cv2.imread("D:\\t\\1.jpg") # res = cv2.resize(img, (500, 300)) row, column, _ = img.shape blob = cv2.dnn.blobFromImage(img, 1.0, (column, row), (103.939, 116.779, 123.680), swapRB=False, crop=False) net.setInput(blob) out = net.forward() out = out.reshape(3, out.shape[2], out.shape[3]) out[0] += 103.939 out[1] += 116.779 out[2] += 123.680 # output = out/255 out = out.transpose(1, 2, 0) # output = output.transpose(1, 2, 0) # cv2.imshow("out", output) cv2.imwrite("tmp.jpg", out) out2 = cv2.imread("tmp.jpg") # out_res = cv2.resize(out, (500, 300)) return out2 class MosaicStyleTransformItem(MyItem): def __init__(self, parent=None): super(MosaicStyleTransformItem, self).__init__('mosaic', parent=parent) def __call__(self, img): net = cv2.dnn.readNetFromTorch("./style/mosaic.t7") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # img = cv2.imread("D:\\t\\1.jpg") # res = cv2.resize(img, (500, 300)) row, column, _ = img.shape blob = cv2.dnn.blobFromImage(img, 1.0, (column, row), (103.939, 116.779, 123.680), swapRB=False, crop=False) net.setInput(blob) out = net.forward() out = out.reshape(3, out.shape[2], out.shape[3]) out[0] += 103.939 out[1] += 116.779 out[2] += 123.680 # output = out/255 out = out.transpose(1, 2, 0) # output = output.transpose(1, 2, 0) # cv2.imshow("out", output) cv2.imwrite("tmp.jpg", out) out2 = cv2.imread("tmp.jpg") # out_res = cv2.resize(out, (500, 300)) return out2 class StarryNightStyleTransformItem(MyItem): def __init__(self, parent=None): super(StarryNightStyleTransformItem, self).__init__('starry_night', parent=parent) def __call__(self, img): net = cv2.dnn.readNetFromTorch("./style/starry_night.t7") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # img = cv2.imread("D:\\t\\1.jpg") # res = cv2.resize(img, (500, 300)) row, column, _ = img.shape blob = cv2.dnn.blobFromImage(img, 1.0, (column, row), (103.939, 116.779, 123.680), swapRB=False, crop=False) net.setInput(blob) out = net.forward() out = out.reshape(3, out.shape[2], out.shape[3]) out[0] += 103.939 out[1] += 116.779 out[2] += 123.680 # output = out/255 out = out.transpose(1, 2, 0) # output = output.transpose(1, 2, 0) # cv2.imshow("out", output) cv2.imwrite("tmp.jpg", out) out2 = cv2.imread("tmp.jpg") # out_res = cv2.resize(out, (500, 300)) return out2 class ScreamStyleTransformItem(MyItem): def __init__(self, parent=None): super(ScreamStyleTransformItem, self).__init__('the_scream', parent=parent) def __call__(self, img): net = cv2.dnn.readNetFromTorch("./style/the_scream.t7") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # img = cv2.imread("D:\\t\\1.jpg") # res = cv2.resize(img, (500, 300)) row, column, _ = img.shape blob = cv2.dnn.blobFromImage(img, 1.0, (column, row), (103.939, 116.779, 123.680), swapRB=False, crop=False) net.setInput(blob) out = net.forward() out = out.reshape(3, out.shape[2], out.shape[3]) out[0] += 103.939 out[1] += 116.779 out[2] += 123.680 # output = out/255 out = out.transpose(1, 2, 0) # output = output.transpose(1, 2, 0) # cv2.imshow("out", output) cv2.imwrite("tmp.jpg", out) out2 = cv2.imread("tmp.jpg") # out_res = cv2.resize(out, (500, 300)) return out2 class WaveStyleTransformItem(MyItem): def __init__(self, parent=None): super(WaveStyleTransformItem, self).__init__('the_wave', parent=parent) def __call__(self, img): net = cv2.dnn.readNetFromTorch("./style/the_wave.t7") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # img = cv2.imread("D:\\t\\1.jpg") # res = cv2.resize(img, (500, 300)) row, column, _ = img.shape blob = cv2.dnn.blobFromImage(img, 1.0, (column, row), (103.939, 116.779, 123.680), swapRB=False, crop=False) net.setInput(blob) out = net.forward() out = out.reshape(3, out.shape[2], out.shape[3]) out[0] += 103.939 out[1] += 116.779 out[2] += 123.680 # output = out/255 out = out.transpose(1, 2, 0) # output = output.transpose(1, 2, 0) # cv2.imshow("out", output) cv2.imwrite("tmp.jpg", out) out2 = cv2.imread("tmp.jpg") # out_res = cv2.resize(out, (500, 300)) return out2 class UdnieStyleTransformItem(MyItem): def __init__(self, parent=None): super(UdnieStyleTransformItem, self).__init__('udnie', parent=parent) def __call__(self, img): net = cv2.dnn.readNetFromTorch("./style/udnie.t7") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # img = cv2.imread("D:\\t\\1.jpg") # res = cv2.resize(img, (500, 300)) row, column, _ = img.shape blob = cv2.dnn.blobFromImage(img, 1.0, (column, row), (103.939, 116.779, 123.680), swapRB=False, crop=False) net.setInput(blob) out = net.forward() out = out.reshape(3, out.shape[2], out.shape[3]) out[0] += 103.939 out[1] += 116.779 out[2] += 123.680 # output = out/255 out = out.transpose(1, 2, 0) # output = output.transpose(1, 2, 0) # cv2.imshow("out", output) cv2.imwrite("tmp.jpg", out) out2 = cv2.imread("tmp.jpg") # out_res = cv2.resize(out, (500, 300)) return out2 class ClarityItem(MyItem): def __init__(self, parent=None): super(ClarityItem, self).__init__('锐化', parent=parent) def __call__(self, img): kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) dst = cv2.filter2D(img, -1, kernel=kernel) return dst class NoiseItem(MyItem): def __init__(self, parent=None): super(NoiseItem, self).__init__('噪点', parent=parent) def __call__(self, img): num = int(0.02 * img.shape[0] * img.shape[1]) # 椒盐噪声点数量 random.randint(0, img.shape[0]) img2 = img.copy() for i in range(num): X = random.randint(0, img2.shape[0] - 1) # 从0到图像长度之间的一个随机整数,因为是闭区间所以-1 Y = random.randint(0, img2.shape[1] - 1) if random.randint(0, 1) == 0: # 黑白色概率55开 img2[X, Y] = (255, 255, 255) # 白色 else: img2[X, Y] = (0, 0, 0) # 黑色 return img2 class ImageCutItem(MyItem): def __init__(self, parent=None): super(ImageCutItem, self).__init__('裁剪', parent=parent) self._x1 = 0 self._x2 = 300 self._y1 = 200 self._y2 = 600 def __call__(self, img): dst = img[self._y1:self._y2, self._x1:self._x2] return dst