master
ZhuYan 4 years ago
parent abad2ef9d2
commit 1dcd26a492

@ -1,3 +0,0 @@
from django.contrib import admin
# Register your models here.

@ -1,6 +0,0 @@
from django.apps import AppConfig
class ActiondriveConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'actionDrive'

@ -1,3 +0,0 @@
from django.db import models
# Create your models here.

@ -1,3 +0,0 @@
from django.test import TestCase
# Create your tests here.

@ -1,3 +0,0 @@
from django.shortcuts import render
# Create your views here.

@ -1,3 +0,0 @@
from django.contrib import admin
# Register your models here.

@ -1,6 +0,0 @@
from django.apps import AppConfig
class AgetransferConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'ageTransfer'

@ -1,3 +0,0 @@
from django.db import models
# Create your models here.

@ -1,3 +0,0 @@
from django.test import TestCase
# Create your tests here.

@ -1,3 +0,0 @@
from django.shortcuts import render
# Create your views here.

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

@ -1,16 +1,18 @@
import cv2
import numpy as np
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.http import HttpResponse
from removebg import RemoveBg
from django.http import JsonResponse
from django.views.decorators.http import require_POST,require_GET
from matplotlib import pyplot as plt
import sys
sys.path.append("..")
import imutils
from PIL import Image
sys.path.append("../")
import urllib.request
import base64
import cv2
import numpy as np
import numba as nb
from pyzbar.pyzbar import decode
# base64转cv
def base64tocv(imagecode):
strList = str(imagecode).split(',')
@ -23,14 +25,14 @@ def base64tocv(imagecode):
# image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return image
#cv 转 base64
def cvtobase64(img):
#
# image = cv2.imencode('.jpg', img)[1]
# res_bs64 = str(base64.b64encode(image))[2:-1]
res_b = cv2.imencode('.jpg', img)[1].tostring()
res_bs64 = base64.b64encode(res_b)
return res_bs64
# #cv 转 base64
# def cvtobase64(img):
# #
# # image = cv2.imencode('.jpg', img)[1]
# # res_bs64 = str(base64.b64encode(image))[2:-1]
# res_b = cv2.imencode('.jpg', img)[1].tostring()
# res_bs64 = base64.b64encode(res_b)
# return res_bs64
#InMemoryUploadedFile 转 ndarray
def getNdarraybyInMemoryUploadedFile(uploaded_file):
@ -97,12 +99,171 @@ def rotatePicture(request):
image = request.POST.get('files')
cv_image = base64tocv(image)
rows, cols, depth = cv_image.shape
(cX, cY) = (cols // 2, rows // 2)
direction=request.POST.get('direction')
if(direction=='left'):
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)
cv_image = cv2.warpAffine(cv_image, M, (cols, rows))
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((rows * sin) + (cols * cos))
nH = int((rows * cos) + (cols * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
cv_image = cv2.warpAffine(cv_image, M, (nW, nH),borderValue=(255,255,255))
elif(direction=='right'):
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -90, 1)
cv_image = cv2.warpAffine(cv_image, M, (cols, rows))
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((rows * sin) + (cols * cos))
nH = int((rows * cos) + (cols * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
cv_image = cv2.warpAffine(cv_image, M, (nW, nH),borderValue=(255,255,255))
rotate_image=getbase64byndarray(cv_image)
return JsonResponse(data={"image": rotate_image}, json_dumps_params={'ensure_ascii': False})
return JsonResponse(data={"image": rotate_image}, json_dumps_params={'ensure_ascii': False})
#抠图
@require_POST
def keying(request):
rmbg = RemoveBg("XHfvSY3phCVna4NZj8Tr8Wnz", "error.log")
image = request.POST.get('files')
rmbg.remove_background_from_base64_img(image,new_file_name="output.jpg",bg_color=(0,0,0))
result=cv2.imread("output.jpg")
result=getbase64byndarray(result)
return JsonResponse(data={"image": result}, json_dumps_params={'ensure_ascii': False})
#图像平滑
@require_POST
def smooth(request):
base_image = request.POST.get('files')
cv_image=base64tocv(base_image)
imgarray = np.array(cv_image)
height, width = imgarray.shape[0], imgarray.shape[1]
edge = int((3 - 1) / 2)
if height - 1 - edge <= edge or width - 1 - edge <= edge:
print("the kenerl is to long")
return None
for i in range(height):
for j in range(width):
if i <= edge - 1 or i >= height - 1 - edge or j <= edge - 1 or j >= height - 1 - edge:
imgarray[i][j] = imgarray[i][j]
else:
num = []
for m in range(i - edge, i + edge + 1):
for n in range(j - edge, j + edge + 1):
num.append((imgarray[m][n])[0]) # 这里通过彩色图像第一个值计算中值也可以改为第二个或者第三个
temp = np.median(num)
idex_tem = num.index(temp) # 获取中值在数组中的坐标
l1 = int((idex_tem / 3)) - edge + i # 根据进制转换反推出中值在图像中的坐标
l2 = (idex_tem % 3) - edge + j
imgarray[i][j] = imgarray[l1][l2] # 赋值
# num1 = np.sort(num)[int(self.k * self.k / 2)]
# print(idex_tem)
# temp = np.median(imgarray[i][j], imgarray[i - edge][j], imgarray[i - edge][j])
# imgarray[i][j] = np.median(imgarray[i - edge:i + edge + 1, j -edge:j+edge + 1])
new_img = Image.fromarray(imgarray)
result=getbase64byndarray(imgarray)
return JsonResponse(data={"image": result}, json_dumps_params={'ensure_ascii': False})
#曝光调整
def exposure(request):
img = request.POST.get('files')
image = base64tocv(img)
# chans = cv2.split(image)
# colors = ("b", "g", "r")
# for (chan, color) in zip(chans, colors):
# hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
# # plt.plot(hist, color=color)
# # plt.xlim([0, 256])
# plt.subplot(4, 2, 5)
equalizeOver = np.zeros(image.shape, image.dtype)
equalizeOver[:, :, 0] = cv2.equalizeHist(image[:, :, 0])
equalizeOver[:, :, 1] = cv2.equalizeHist(image[:, :, 1])
equalizeOver[:, :, 2] = cv2.equalizeHist(image[:, :, 2])
output = getbase64byndarray(equalizeOver)
return JsonResponse(data={"image": output}, json_dumps_params={'ensure_ascii': False})
#锐化
def sharpen(request):
img = request.POST.get('files')
image = base64tocv(img)
sharpen_op = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], dtype=np.float32)
# sharpen_op = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]], dtype=np.float32)
sharpen_image = cv2.filter2D(image, cv2.CV_32F, sharpen_op)
sharpen_image = cv2.convertScaleAbs(sharpen_image)
output = getbase64byndarray(sharpen_image)
return JsonResponse(data={"image": output}, json_dumps_params={'ensure_ascii': False})
#识别条形码
def barCode(request):
img = request.POST.get('files')
image = base64tocv(img)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("Image", gray)
cv2.waitKey(0)
# 计算图片x和y方向的Scharr梯度大小
ddepth = cv2.cv.CV_32F if imutils.is_cv2() else cv2.CV_32F
gradX = cv2.Sobel(gray, ddepth=ddepth, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=ddepth, dx=0, dy=1, ksize=-1)
# 用x方向的梯度减去y方向的梯度
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
cv2.imshow("gradient", gradient)
cv2.waitKey(0)
blurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(blurred, 180, 255, cv2.THRESH_BINARY)
cv2.imshow("blur", blurred)
cv2.waitKey(0)
cv2.imshow("thresh", thresh)
cv2.waitKey(0)
#  构造一个闭合核并应用于阈值图片
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 10))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
cv2.imshow("closed", closed)
cv2.waitKey(0)
# 执行一系列的腐蚀和膨胀操作
closed = cv2.dilate(closed, None, iterations=4)
closed = cv2.erode(closed, None, iterations=4)
cv2.imshow("Image", closed)
cv2.waitKey(0)
# 找到阈值化后图片中的轮廓,然后进行根据区域进行排序,仅保留最大区域
cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
# 计算最大轮廓的旋转边界框
rect = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(rect) if imutils.is_cv2() else cv2.boxPoints(rect)
box = np.int0(box)
# 在检测到的条形码周围绘制边界框并显示图片
cv2.drawContours(image, [box], -1, (0, 255, 0), 3)
result=getbase64byndarray(image)
return JsonResponse(data={"image": result}, json_dumps_params={'ensure_ascii': False})
#噪声去除
def removeNoise(request):
img = request.POST.get('files')
img = base64tocv(img)
specular = cv2.fastNlMeansDenoisingColored(img, None, 10, 5, 7, 21)
result = getbase64byndarray(specular)
return JsonResponse(data={"image": result}, json_dumps_params={'ensure_ascii': False})

@ -0,0 +1,179 @@
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.
ERROR:django.request:Internal Server Error: /keying/
Traceback (most recent call last):
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 131, in keying
result=cvtobase64(image)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 32, in cvtobase64
res_b = cv2.imencode('.jpg', img)[1].tostring()
cv2.error: OpenCV(4.6.0) D:\a\opencv-python\opencv-python\opencv\modules\imgcodecs\src\loadsave.cpp:976: error: (-215:Assertion failed) !image.empty() in function 'cv::imencode'
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.
ERROR:django.request:Internal Server Error: /keying/
Traceback (most recent call last):
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 131, in keying
result=cvtobase64(image)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 32, in cvtobase64
res_b = cv2.imencode('.jpg', img)[1].tostring()
cv2.error: OpenCV(4.6.0) D:\a\opencv-python\opencv-python\opencv\modules\imgcodecs\src\loadsave.cpp:976: error: (-215:Assertion failed) !image.empty() in function 'cv::imencode'
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.
ERROR:django.request:Internal Server Error: /keying/
Traceback (most recent call last):
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 132, in keying
return JsonResponse(data={"image": result}, json_dumps_params={'ensure_ascii': False})
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\http\response.py", line 603, in __init__
data = json.dumps(data, cls=encoder, **json_dumps_params)
File "C:\Users\ASUS\anaconda3\lib\json\__init__.py", line 234, in dumps
return cls(
File "C:\Users\ASUS\anaconda3\lib\json\encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "C:\Users\ASUS\anaconda3\lib\json\encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\serializers\json.py", line 105, in default
return super().default(o)
File "C:\Users\ASUS\anaconda3\lib\json\encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type bytes is not JSON serializable
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.
ERROR:django.request:Internal Server Error: /keying/
Traceback (most recent call last):
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 129, in keying
rmbg.remove_background_from_base64_img(image,bg_color=(0,0,0))
File "C:\Users\ASUS\anaconda3\lib\site-packages\removebg\removebg.py", line 72, in remove_background_from_base64_img
response.raise_for_status()
File "C:\Users\ASUS\anaconda3\lib\site-packages\requests\models.py", line 943, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://api.remove.bg/v1.0/removebg
ERROR:django.request:Internal Server Error: /keying/
Traceback (most recent call last):
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 129, in keying
rmbg.remove_background_from_base64_img(image,bg_color=(0,0,0))
File "C:\Users\ASUS\anaconda3\lib\site-packages\removebg\removebg.py", line 72, in remove_background_from_base64_img
response.raise_for_status()
File "C:\Users\ASUS\anaconda3\lib\site-packages\requests\models.py", line 943, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://api.remove.bg/v1.0/removebg
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.
ERROR:django.request:Internal Server Error: /keying/
Traceback (most recent call last):
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 128, in keying
result=getbase64byndarray(image)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 42, in getbase64byndarray
retval, buffer = cv2.imencode('.jpg', pic_img)
cv2.error: OpenCV(4.6.0) :-1: error: (-5:Bad argument) in function 'imencode'
> Overload resolution failed:
> - img is not a numpy array, neither a scalar
> - Expected Ptr<cv::UMat> for argument 'img'
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.
ERROR:django.request:Internal Server Error: /keying/
Traceback (most recent call last):
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 128, in keying
result=getbase64byndarray(image)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 42, in getbase64byndarray
retval, buffer = cv2.imencode('.jpg', pic_img)
cv2.error: OpenCV(4.6.0) :-1: error: (-5:Bad argument) in function 'imencode'
> Overload resolution failed:
> - img is not a numpy array, neither a scalar
> - Expected Ptr<cv::UMat> for argument 'img'
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.
ERROR:django.request:Internal Server Error: /keying/
Traceback (most recent call last):
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 129, in keying
return JsonResponse(data={"image": result}, json_dumps_params={'ensure_ascii': False})
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\http\response.py", line 603, in __init__
data = json.dumps(data, cls=encoder, **json_dumps_params)
File "C:\Users\ASUS\anaconda3\lib\json\__init__.py", line 234, in dumps
return cls(
File "C:\Users\ASUS\anaconda3\lib\json\encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "C:\Users\ASUS\anaconda3\lib\json\encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\serializers\json.py", line 105, in default
return super().default(o)
File "C:\Users\ASUS\anaconda3\lib\json\encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type ndarray is not JSON serializable
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.
ERROR:django.request:Internal Server Error: /keying/
Traceback (most recent call last):
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 127, in keying
rmbg.remove_background_from_base64_img(image,new_file_name="output.jpg",bg_color=(0,0,0))
File "C:\Users\ASUS\anaconda3\lib\site-packages\removebg\removebg.py", line 72, in remove_background_from_base64_img
response.raise_for_status()
File "C:\Users\ASUS\anaconda3\lib\site-packages\requests\models.py", line 943, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://api.remove.bg/v1.0/removebg
ERROR:django.request:Internal Server Error: /keying/
Traceback (most recent call last):
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\ASUS\anaconda3\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "F:\imageprocess\imageProcess\basicFunction\views.py", line 127, in keying
rmbg.remove_background_from_base64_img(image,new_file_name="output.jpg",bg_color=(0,0,0))
File "C:\Users\ASUS\anaconda3\lib\site-packages\removebg\removebg.py", line 72, in remove_background_from_base64_img
response.raise_for_status()
File "C:\Users\ASUS\anaconda3\lib\site-packages\requests\models.py", line 943, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://api.remove.bg/v1.0/removebg
INFO:django.utils.autoreload:F:\imageprocess\imageProcess\basicFunction\views.py changed, reloading.

@ -3,13 +3,21 @@ from django.contrib import admin
from django.urls import path
from django.conf.urls import url
import sys
sys.path.append("..")
sys.path.append("../")
from basicFunction import views as basic_function_view
from styleTransfer import views as style_transfer_view
urlpatterns = [
path('admin/', admin.site.urls),
path('show/', basic_function_view.getPicture),
path('resize/', basic_function_view.resizePicture),
path('mirror/', basic_function_view.mirrorPicture),
path('rotate/', basic_function_view.rotatePicture),
path('keying/', basic_function_view.keying),
path('styleTransfer/', style_transfer_view.style_transfer),
path('smooth/', basic_function_view.smooth),
path('exposure/', basic_function_view.exposure),
path('sharpen/', basic_function_view.sharpen),
path('barCode/', basic_function_view.barCode),
path('removeNoise/', basic_function_view.removeNoise),
]

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

@ -0,0 +1,36 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use this file
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
"""
class Constant:
# error code
ACL_ERROR_NONE = 0
# rule for mem
ACL_MEM_MALLOC_HUGE_FIRST = 0
ACL_MEM_MALLOC_HUGE_ONLY = 1
ACL_MEM_MALLOC_NORMAL_ONLY = 2
# rule for memory copy
ACL_MEMCPY_HOST_TO_HOST = 0
ACL_MEMCPY_HOST_TO_DEVICE = 1
ACL_MEMCPY_DEVICE_TO_HOST = 2
ACL_MEMCPY_DEVICE_TO_DEVICE = 3
# images format
IMG_EXT = ['.jpg', '.JPG', '.png', '.PNG', '.bmp', '.BMP', '.jpeg', '.JPEG']
# numpy data type
NPY_FLOAT32 = 11

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

@ -0,0 +1,135 @@
import numpy as np
import mindspore
# import moxing as mox
from mindspore import numpy
from PIL import Image
from mindspore import ops
import os
import cv2 as cv
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_version
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore import Model, context, nn, Tensor, Parameter, load_checkpoint
def save_img(i, optimizer, output_path):
if not os.path.exists(output_path):
os.mkdir(output_path)
final_img = optimizer.parameters[0].asnumpy()
final_img = final_img.squeeze(axis=0)
final_img = np.moveaxis(final_img, 0, 2)
dump_img = np.copy(final_img)
dump_img += np.array([123.675, 116.28, 103.53]).reshape((1, 1, 3))
dump_img = np.clip(dump_img, 0, 255).astype('uint8')
# dump_img = cv.resize(dump_img, (224, 224), interpolation=cv.INTER_CUBIC)
img_path = output_path+"/"+"iter_"+str(i)+".jpg" # imgput_path = ./output_path/content2_to_sty3/lr=0.5/iter_1
cv.imwrite(img_path, dump_img[:, :, ::-1])
# mox.file.copy_parallel(img_path, args.train_url+img_path[12:])
def create_dataset(img):
"""生成数据集"""
dataset = ds.NumpySlicesDataset(data=img, column_names=['data'])
return dataset
def gram_matrix(x, should_normalize=True):
"""
Generate gram matrices of the representations of content and style images.
"""
# 对网络的特征进行矩阵编码
b, ch, h, w = x.shape # x的形状
features = x.view(b, ch, w * h) # 将x降维
transpose = ops.Transpose()
batmatmul = ops.BatchMatMul(transpose_a=False)
features_t = transpose(features, (0, 2, 1))
gram = batmatmul(features, features_t) # gram 为矩阵相乘计算得新图片的像素
if should_normalize: # 标准化
gram /= ch * h * w
return gram
def load_image(img_path, target_shape=None):
# 图像预处理 返回 1 * 3 * 400 * x
if not os.path.exists(img_path):
raise Exception(f'Path not found: {img_path}')
img = cv.imread(img_path)[:, :, ::-1] # convert BGR to RGB when reading
if target_shape is not None:
if isinstance(target_shape, int) and target_shape != -1:
current_height, current_width = img.shape[:2]
new_height = target_shape
new_width = int(current_width * (new_height / current_height))
img = cv.resize(img, (new_width, new_height), interpolation=cv.INTER_CUBIC)
else:
img = cv.resize(img, (target_shape[1], target_shape[0]), interpolation=cv.INTER_CUBIC)
img = img.astype(np.float32)
to_tensor = py_vision.ToTensor() # channel conversion and pixel value normalization
normalize = c_version.Normalize(mean=[123.675, 116.28, 103.53], std=[1, 1, 1])
img = normalize(img) # <class 'numpy.ndarray'> (400, 533, 3)
img = to_tensor(img) * 225 # <class 'numpy.ndarray'> (3, 400, 533)
img = np.expand_dims(img, axis=0)
# img /= 255
# transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Lambda(lambda x: x.mul(255)), # 乘255
# transforms.Normalize(mean=[123.675, 116.28, 103.53], std=[1, 1, 1])
# ])
# img = transform(img).unsqueeze(0)
# img = img.numpy()
return img
class Optim_Loss(nn.Cell):
def __init__(self, net, target_maps):
super(Optim_Loss, self).__init__()
self.net = net
self.target_maps = target_maps[:-1]
self.weight = [100000.0, 30000.0, 1.0]
self.get_style_loss = nn.MSELoss(reduction='sum')
self.get_content_loss = nn.MSELoss(reduction='mean')
self.cast = ops.Cast() # 转换为 mindspore.tensor
self.ct = target_maps[2]
def construct(self):
optimize_img = self.ct
current_maps = self.net(self.cast(optimize_img, mindspore.float32)) # 6个特征图
# 当前图片的特征
current_content_maps = current_maps[4].squeeze(axis=0) # 内容特征 # 4_2的内容map
for i in range(len(current_maps)): # 0, 1, 2, 3, 4, 5 1, 2, 3, 4, 4_2, 5
if i != 4:
current_maps[i] = gram_matrix(current_maps[i])
target_content_maps = self.target_maps[0] # 任务的内容特征
target_content_gram = self.target_maps[1] # 任务的风格特征
content_loss = self.get_content_loss(current_content_maps, target_content_maps)
style_loss = 0
for j in range(6):
if j == 5:
style_loss += self.get_style_loss(current_maps[j], target_content_gram[j-1])
if j < 4:
style_loss += self.get_style_loss(current_maps[j], target_content_gram[j])
style_loss /= 5
tv_loss = numpy.sum(numpy.abs(optimize_img[:, :, :, :-1] - optimize_img[:, :, :, 1:])) \
+ numpy.sum(numpy.abs(optimize_img[:, :, :-1, :] - optimize_img[:, :, 1:, :]))
total_loss = content_loss * self.weight[0] + style_loss * self.weight[1] + tv_loss * self.weight[2]
return total_loss/130001
def load_parameters(file_name):
param_dict = load_checkpoint(file_name)
param_dict_new = {}
# print(param_dict)
for key, values in param_dict.items():
if key.startswith('moments.'):
continue
elif key.startswith("layers."):
param_dict_new['l'+key[7:]] = values
else:
param_dict_new[key] = values
return param_dict_new

@ -1,3 +0,0 @@
from django.db import models
# Create your models here.

Binary file not shown.

After

Width:  |  Height:  |  Size: 479 KiB

@ -0,0 +1,77 @@
# coding=utf-8
import os
# import moxing as mox
import numpy as np
import cv2 as cv
import mindspore
from mindspore import nn, Tensor, Parameter, context
from mindspore import load_checkpoint, load_param_into_net, save_checkpoint
from vgg import Vgg19
from method import create_dataset, load_image, gram_matrix, Optim_Loss, save_img, load_parameters
# 设置为动态图模式
import argparse
# parser = argparse.ArgumentParser(description='Training img transfer')
# parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU'])
# parser.add_argument('--data_url', required=True, default=None)
# parser.add_argument('--train_url', required=True, default=None)
# # parser.add_argument("model_url", type=str, default=None, help="pretrained checkpoint file path of vgg.")
# args = parser.parse_known_args()[0]
# context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
# if not os.path.exists("./img_data"):
# os.mkdir("./img_data")
# mox.file.copy_parallel(args.data_url, "./img_data")
#
# if not os.path.exists('./output_img'):
# os.mkdir("./output_img")
vgg = Vgg19()
param_dict = load_parameters("vgg19.ckpt")
print("ckpt load success")
load_param_into_net(vgg, param_dict)
print("param load success")
vgg.set_train(False) # 评估模式
def load_img_ckpt(path):
return load_parameters(path)
def optimize_picture(content_path, style_path, iter_num, learning_rate, output_path, mode):
if mode == 0:
optimize_img = load_image(content_path, 400)
content_maps = vgg(Tensor(optimize_img))
optimizing_param = Parameter(Tensor(optimize_img, mindspore.float32), "optimizing_img")
else:
optimize_img = load_img_ckpt(content_path)["img"]
content_maps = vgg(Tensor(optimize_img))
optimizing_param = Parameter(Tensor(optimize_img, mindspore.float32), "optimizing_img")
style_img = load_image(style_path, 400)
# 特征图提取6层
content_maps = content_maps[4].squeeze(axis=0) # 内容图片vgg19提取的特征
style_maps = list(vgg(Tensor(style_img))) # 风格图片的特征
style_maps.pop(4) # 剔除掉不需要的map
style_gram = list(gram_matrix(x) for x in style_maps)
# 要优化的图片
num_of_iterations = iter_num + 1 # 迭代的次数
loss_opt = Optim_Loss(vgg, [content_maps, style_gram, optimizing_param])
cosine_decay_lr = nn.CosineDecayLR(0.05, 2.0, 2000)
optimizer_n = nn.Adam([optimizing_param], learning_rate=cosine_decay_lr) # 损失优化方法
# optimizer = nn.Adam([optimizing_param], learning_rate=learning_rate) # 损失优化方法
train_net = nn.TrainOneStepCell(loss_opt, optimizer_n)
train_net.set_train()
for i in range(num_of_iterations):
if i % 50 == 0:
save_img(i, optimizer_n, output_path)
loss = train_net()
print(str(content_path[-13:-4]) + "_to_" + str(style_path[-8:-4]) + ":iteration_", str(i), ": loss", loss)
final = optimizer_n.parameters
save_checkpoint([{"name": "img", "data": final[0]}], output_path+"/img_1.ckpt")
optimize_picture("output_img/iter_19/img.ckpt",
"transfer_img/trans_6/sty_fam_1.jpg", 2000, 0.05, "output_img/iter_19", 1)
# img = cv.imread("transfer_img/trans_5/style_img.jpg")[:, :, ::-1]
# print(img.shape)

@ -0,0 +1,54 @@
from mindspore import load_checkpoint, load_param_into_net
from mindspore import nn, context
def load_parameters(file_name):
param_dict = load_checkpoint(file_name)
param_dict_new = {}
# print(param_dict)
for key, values in param_dict.items():
if key.startswith('moments.'):
continue
elif key.startswith("layers."):
param_dict_new['l'+key[7:]] = values
else:
param_dict_new[key] = values
return param_dict_new
class Vgg19(nn.Cell):
def __init__(self):
super().__init__()
self.l0 = nn.Conv2d(3, 64, kernel_size=3, weight_init='ones')
self.l2 = nn.Conv2d(64, 64, kernel_size=3, weight_init='ones')
self.l5 = nn.Conv2d(64, 128, kernel_size=3, weight_init='ones')
self.l7 = nn.Conv2d(128, 128, kernel_size=3, weight_init='ones')
self.l10 = nn.Conv2d(128, 256, kernel_size=3, weight_init='ones')
self.l12 = nn.Conv2d(256, 256, kernel_size=3, weight_init='ones')
self.l14 = nn.Conv2d(256, 256, kernel_size=3, weight_init='ones')
self.l16 = nn.Conv2d(256, 256, kernel_size=3, weight_init='ones')
self.l19 = nn.Conv2d(256, 512, kernel_size=3, weight_init='ones')
self.l21 = nn.Conv2d(512, 512, kernel_size=3, weight_init='ones')
self.l23 = nn.Conv2d(512, 512, kernel_size=3, weight_init='ones')
self.l25 = nn.Conv2d(512, 512, kernel_size=3, weight_init='ones')
self.l28 = nn.Conv2d(512, 512, kernel_size=3, weight_init='ones')
self.l30 = nn.Conv2d(512, 512, kernel_size=3, weight_init='ones')
self.l32 = nn.Conv2d(512, 512, kernel_size=3, weight_init='ones')
self.l34 = nn.Conv2d(512, 512, kernel_size=3, weight_init='ones')
self.relu = nn.ReLU()
self.mp = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
def construct(self, x):
layer_1 = self.relu(self.l0(x)) # 3-64
layer_2 = self.relu(self.l5(self.mp(self.relu(self.l2(layer_1)))))
layer_3 = self.relu(self.l10(self.relu(self.l7(self.mp(layer_2)))))
layer_4 = self.relu(self.l19(self.mp(self.relu(self.l16(self.relu(self.l14(self.relu(self.l12(layer_3)))))))))
layer_4_2 = self.relu(self.l21(layer_4))
layer_5 = self.relu(self.l28(self.mp(self.relu(self.l25(self.relu(self.l23(layer_4_2)))))))
return [layer_1, layer_2, layer_3, layer_4, layer_4_2, layer_5]

@ -1,3 +1,84 @@
from django.shortcuts import render
import cv2
import numpy as np
from django.http import JsonResponse
import base64
import tensorflow
from django.views.decorators.http import require_POST
def base64tocv(imagecode):
strList = str(imagecode).split(',')
b64_img = ''
for i in strList[1:]:
b64_img += i
imgString = base64.b64decode(b64_img)
nparr = np.fromstring(imgString, np.uint8)
image = cv2.imdecode(nparr, cv2.COLOR_RGB2BGR)
# image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return image
#ndarray 转 base64
def getbase64byndarray(pic_img):
retval, buffer = cv2.imencode('.jpg', pic_img)
pic_str = base64.b64encode(buffer)
return pic_str.decode()
@require_POST
def style_transfer(request):
'''
pathIn: 原始图片的路径
pathOut: 风格化图片的保存路径
model: 预训练模型的路径
width: 设置风格化图片的宽度默认为None, 即原始图片尺寸
jpg_quality: 0-100设置输出图片的质量默认80越大图片质量越好
'''
## 读入原始图片,调整图片至所需尺寸,然后获取图片的宽度和高度
image = request.POST.get('files')
img = base64tocv(image)
(h, w) = img.shape[:2]
## 从本地加载预训练模型
net = get_model_from_style(request.POST.get('style'))
## 将图片构建成一个blob设置图片尺寸将各通道像素值减去平均值比如ImageNet所有训练样本各通道统计平均值
## 然后执行一次前馈网络计算,并输出计算所需的时间
blob = cv2.dnn.blobFromImage(img, 1.0, (w, h), (103.939, 116.779, 123.680), swapRB=False, crop=False)
net.setInput(blob)
output = net.forward()
## reshape输出结果, 将减去的平均值加回来,并交换各颜色通道
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output = output.transpose(1, 2, 0)
output=getbase64byndarray(output)
return JsonResponse(data={"image": output, "height": h, "width": w},
json_dumps_params={'ensure_ascii': False})
model_base_dir = "F:\imageprocess\imageProcess\styleTransfer\models\\"
d_model_map = {
0: "candy",
1: "udnie",
2: "la_muse",
3: "the_scream",
4: "mosaic",
5: "feathers",
6: "starry_night"
}
def get_model_from_style(style):
"""
加载指定风格的模型
:param style: 模型编码
:return: model
"""
style=int(style)
model_name = d_model_map[style]
model_path = model_base_dir + model_name + ".t7"
model = cv2.dnn.readNetFromTorch(model_path)
return model
# Create your views here.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 187 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 348 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 248 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 431 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 447 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 352 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 346 KiB

@ -1,6 +1,7 @@
<template>
<div>
<el-container>
<el-button type="primary" plain @click="saveImg"></el-button>
<el-main style="padding:10px;margin-right:10px;border-radius: 10px;"><router-view :key="$route.fullPath"/>
<el-dialog title="图片上传" :visible.sync="dialogVisible" width="30%"
@ -30,22 +31,42 @@
<el-footer>
<el-dialog title="风格转换" :visible.sync="styleVisible" width="80%">
<img src='../assets/candy.png' height="100px" width="100px" class="exampleImage" @click="styleTransfer(0)"/>
<img src='../assets/la_muse.png' height="100px" width="100px" class="exampleImage" @click="styleTransfer(2)"/>
<img src='../assets/mosaic.png' height="100px" width="100px" class="exampleImage" @click="styleTransfer(4)"/>
<br>
<img v-bind:src="preview" height="300px" width="300px" />
<br>
<span>
<el-button @click="styleVisible = false"> </el-button>
<el-button type="primary" @click="styleVisible = false;image=preview"> </el-button>
</span>
</el-dialog>
<el-dialog title="尺寸" :visible.sync="sizeVisible" width="30%">
高度:<el-input-number v-model="tmpHeight" :precision="0" :min="1" step="1" :max="100000" ></el-input-number><br />
宽度:<el-input-number v-model="tmpWidth" :precision="0" :min="1" step="1" :max="100000" ></el-input-number><br />
<span>
<el-button @click="sizeVisible = false"> </el-button>
<el-button type="primary" @click="submitSize"> </el-button>
<el-button type="primary" @click="submitSize()"> </el-button>
</span>
</el-dialog>
<el-menu class="el-menu-demo" mode="horizontal">
<el-menu-item index="1" @click="clickMenu(1)"></el-menu-item>
<el-menu-item index="1" @click="clickMenu(1)"></el-menu-item>
<el-menu-item index="2" @click="clickMenu(2)"></el-menu-item>
<el-menu-item index="3" @click="clickMenu(3)"></el-menu-item>
<el-menu-item index="4" @click="clickMenu(4)"></el-menu-item>
<el-menu-item index="4" @click="clickMenu(5)"></el-menu-item>
<el-menu-item index="4" @click="clickMenu(6)"></el-menu-item>
<el-menu-item index="5" @click="clickMenu(5)"></el-menu-item>
<el-menu-item index="6" @click="clickMenu(6)"></el-menu-item>
<el-menu-item index="7" @click="clickMenu(7)"></el-menu-item>
<el-menu-item index="8" @click="clickMenu(8)"></el-menu-item>
<el-menu-item index="9" @click="clickMenu(9)"></el-menu-item>
<el-menu-item index="10" @click="clickMenu(10)"></el-menu-item>
<el-menu-item index="11" @click="clickMenu(11)"></el-menu-item>
<el-menu-item index="12" @click="clickMenu(12)"></el-menu-item>
<el-menu-item index="13" @click="clickMenu(13)"></el-menu-item>
</el-menu>
</el-footer>
</el-container>
@ -111,6 +132,7 @@ import axios from "axios"
export default {
data() {
return {
styleVisible: false,
dialogVisible: true,
sizeVisible: false,
imageUrls: [
@ -118,12 +140,17 @@ export default {
'https://fuss10.elemecdn.com/1/34/19aa98b1fcb2781c4fba33d850549jpeg.jpeg',
'https://fuss10.elemecdn.com/0/6f/e35ff375812e6b0020b6b4e8f9583jpeg.jpeg',
],
styleUrls: [
'../assets/candy.png',
],
image: "",
preview: "",
heightInput: "",
widthInput:"",
tmpHeight:"",
tmpWidth:"",
fileList: [],
str: "https://huashi-1305159828.cos.ap-shanghai.myqcloud.com/202110269108S.%E5%8A%B3%E5%8A%A8%E6%AD%A3%E4%B9%89%EF%BC%9A%E5%8A%B3%E5%8A%A8%E5%B9%B8%E7%A6%8F%E4%B8%8D%E5%8F%AF%E6%88%96%E7%BC%BA%E7%9A%84%E4%BB%B7%E5%80%BC%E6%94%AF%E6%92%91_%E6%AF%9B%E5%8B%92%E5%A0%82.pdf"
}
},
methods: {
@ -184,6 +211,42 @@ export default {
formdata.append("direction",'right')
axios.post("/rotate/", formdata).then(res => { var result=res.data;this.image="data:image/png;base64," + result.image; })
}
else if(menuIndex==7){
let formdata = new FormData()
formdata.append("files",this.image)
axios.post("/keying/", formdata).then(res => { var result=res.data;this.image="data:image/png;base64," + result.image; })
}
else if(menuIndex==8){
this.styleVisible=true;
this.preview=this.image;
}
else if(menuIndex==9){
let formdata = new FormData()
formdata.append("files",this.image)
axios.post("/smooth/", formdata).then(res => { var result=res.data;this.image="data:image/png;base64," + result.image; })
}
else if(menuIndex==10){
let formdata = new FormData()
formdata.append("files",this.image)
axios.post("/sharpen/", formdata).then(res => { var result=res.data;this.image="data:image/png;base64," + result.image; })
}
else if(menuIndex==11){
let formdata = new FormData()
formdata.append("files",this.image)
axios.post("/exposure/", formdata).then(res => { var result=res.data;this.image="data:image/png;base64," + result.image; })
}
else if(menuIndex==12){
let formdata = new FormData()
formdata.append("files",this.image)
axios.post("/barCode/", formdata).then(res => { var result=res.data;this.image="data:image/png;base64," + result.image; })
}
else if(menuIndex==13){
let formdata = new FormData()
formdata.append("files",this.image)
axios.post("/removeNoise/", formdata).then(res => { var result=res.data;this.image="data:image/png;base64," + result.image; })
}
},
submitSize(){
let formdata = new FormData()
@ -193,7 +256,31 @@ export default {
this.sizeVisible=false;
axios.post("/resize/", formdata).then(res => { var result=res.data;this.image="data:image/png;base64," + result.image;
this.tmpHeight=result.height;this.tmpWidth=result.width; })
}
},
styleTransfer(index){
let formdata = new FormData()
formdata.append("files",this.image)
formdata.append("style",index)
axios.post("/styleTransfer/", formdata).then(res => { var result=res.data;this.preview="data:image/png;base64," + result.image; })
},
saveImg() {
var base64 = this.image;
var byteCharacters = atob(
base64.replace(/^data:image\/(png|jpeg|jpg);base64,/, "")
);
var byteNumbers = new Array(byteCharacters.length);
for (var i = 0; i < byteCharacters.length; i++) {
byteNumbers[i] = byteCharacters.charCodeAt(i);
}
var byteArray = new Uint8Array(byteNumbers);
var blob = new Blob([byteArray], {
type: undefined,
});
var aLink = document.createElement("a");
aLink.download = "zy&klx图片在线处理.jpg"; //
aLink.href = URL.createObjectURL(blob);
aLink.click();
},
},
mounted() {
},

Loading…
Cancel
Save