You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1864 lines
69 KiB

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

from operator import xor
from pywt import dwt2, idwt2
import cv2
import base64
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
from django.core.validators import RegexValidator
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.shortcuts import render, HttpResponse, redirect
from django.views.decorators.csrf import csrf_exempt
from firstapp.utils.bootstrap import BootstrapForm, BootstrapModelForm
from firstapp import models
from django import forms
import time
# Create your views here.
def index(request):
return render(request,"index.html")
def canny_style(request):
def style_transfer(
pathOut='',
model='',
width=None,
jpg_quality=80):
'''
pathIn: 原始图片的路径
pathOut: 风格化图片的保存路径
model: 预训练模型的路径
width: 设置风格化图片的宽度默认为None, 即原始图片尺寸
jpg_quality: 0-100设置输出图片的质量默认80越大图片质量越好
'''
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
## 读入原始图片,调整图片至所需尺寸,然后获取图片的宽度和高度
(h, w) = img.shape[:2]
if width is not None:
img = cv2.resize(img, (width, round(width * h / w)), interpolation=cv2.INTER_CUBIC)
(h, w) = img.shape[:2]
## 从本地加载预训练模型
print('加载预训练模型......')
net = cv2.dnn.readNetFromTorch(model)
## 将图片构建成一个blob设置图片尺寸将各通道像素值减去平均值比如ImageNet所有训练样本各通道统计平均值
## 然后执行一次前馈网络计算,并输出计算所需的时间
blob = cv2.dnn.blobFromImage(img, 1.0, (w, h), (103.939, 116.779, 123.680), swapRB=False, crop=False)
net.setInput(blob)
start = time.time()
output = net.forward()
end = time.time()
print("风格迁移花费:{:.2f}".format(end - start))
## reshape输出结果, 将减去的平均值加回来,并交换各颜色通道
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output = output.transpose(1, 2, 0)
## 输出风格化后的图片
cv2.imwrite(pathOut, output, [int(cv2.IMWRITE_JPEG_QUALITY), jpg_quality])
if request.method=="POST":
## 测试
import glob
models = glob.glob('./*/*/*.t7')
models ## 列出所有可用的预训练模型
pathOut = './result/result_img99.jpg'
model = './models/instance_norm/candy.t7'
style_transfer(pathOut, model, width=500)
result = cv2.imread("result/result_img99.jpg")
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "canny_style.html",{'result':result})
return render(request,"canny_style.html")
def feather_style(request):
def style_transfer(
pathOut='',
model='',
width=None,
jpg_quality=80):
'''
pathIn: 原始图片的路径
pathOut: 风格化图片的保存路径
model: 预训练模型的路径
width: 设置风格化图片的宽度默认为None, 即原始图片尺寸
jpg_quality: 0-100设置输出图片的质量默认80越大图片质量越好
'''
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
## 读入原始图片,调整图片至所需尺寸,然后获取图片的宽度和高度
(h, w) = img.shape[:2]
if width is not None:
img = cv2.resize(img, (width, round(width * h / w)), interpolation=cv2.INTER_CUBIC)
(h, w) = img.shape[:2]
## 从本地加载预训练模型
print('加载预训练模型......')
net = cv2.dnn.readNetFromTorch(model)
## 将图片构建成一个blob设置图片尺寸将各通道像素值减去平均值比如ImageNet所有训练样本各通道统计平均值
## 然后执行一次前馈网络计算,并输出计算所需的时间
blob = cv2.dnn.blobFromImage(img, 1.0, (w, h), (103.939, 116.779, 123.680), swapRB=False, crop=False)
net.setInput(blob)
start = time.time()
output = net.forward()
end = time.time()
print("风格迁移花费:{:.2f}".format(end - start))
## reshape输出结果, 将减去的平均值加回来,并交换各颜色通道
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output = output.transpose(1, 2, 0)
## 输出风格化后的图片
cv2.imwrite(pathOut, output, [int(cv2.IMWRITE_JPEG_QUALITY), jpg_quality])
if request.method == "POST":
## 测试
import glob
models = glob.glob('./*/*/*.t7')
models ## 列出所有可用的预训练模型
pathOut = './result/result_img98.jpg'
model = './models/instance_norm/feathers.t7'
style_transfer(pathOut, model, width=500)
result = cv2.imread("result/result_img98.jpg")
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "feather_style.html", {'result': result})
return render(request, "feather_style.html")
def la_muse_style(request):
def style_transfer(
pathOut='',
model='',
width=None,
jpg_quality=80):
'''
pathIn: 原始图片的路径
pathOut: 风格化图片的保存路径
model: 预训练模型的路径
width: 设置风格化图片的宽度默认为None, 即原始图片尺寸
jpg_quality: 0-100设置输出图片的质量默认80越大图片质量越好
'''
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
## 读入原始图片,调整图片至所需尺寸,然后获取图片的宽度和高度
(h, w) = img.shape[:2]
if width is not None:
img = cv2.resize(img, (width, round(width * h / w)), interpolation=cv2.INTER_CUBIC)
(h, w) = img.shape[:2]
## 从本地加载预训练模型
print('加载预训练模型......')
net = cv2.dnn.readNetFromTorch(model)
## 将图片构建成一个blob设置图片尺寸将各通道像素值减去平均值比如ImageNet所有训练样本各通道统计平均值
## 然后执行一次前馈网络计算,并输出计算所需的时间
blob = cv2.dnn.blobFromImage(img, 1.0, (w, h), (103.939, 116.779, 123.680), swapRB=False, crop=False)
net.setInput(blob)
start = time.time()
output = net.forward()
end = time.time()
print("风格迁移花费:{:.2f}".format(end - start))
## reshape输出结果, 将减去的平均值加回来,并交换各颜色通道
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output = output.transpose(1, 2, 0)
## 输出风格化后的图片
cv2.imwrite(pathOut, output, [int(cv2.IMWRITE_JPEG_QUALITY), jpg_quality])
if request.method == "POST":
## 测试
import glob
models = glob.glob('./*/*/*.t7')
models ## 列出所有可用的预训练模型
pathOut = './result/result_img97.jpg'
model = './models/instance_norm/la_muse.t7'
style_transfer(pathOut, model, width=500)
result = cv2.imread("result/result_img97.jpg")
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "la_muse_style.html", {'result': result})
return render(request, "la_muse_style.html")
def mosaic_style(request):
def style_transfer(
pathOut='',
model='',
width=None,
jpg_quality=80):
'''
pathIn: 原始图片的路径
pathOut: 风格化图片的保存路径
model: 预训练模型的路径
width: 设置风格化图片的宽度默认为None, 即原始图片尺寸
jpg_quality: 0-100设置输出图片的质量默认80越大图片质量越好
'''
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
## 读入原始图片,调整图片至所需尺寸,然后获取图片的宽度和高度
(h, w) = img.shape[:2]
if width is not None:
img = cv2.resize(img, (width, round(width * h / w)), interpolation=cv2.INTER_CUBIC)
(h, w) = img.shape[:2]
## 从本地加载预训练模型
print('加载预训练模型......')
net = cv2.dnn.readNetFromTorch(model)
## 将图片构建成一个blob设置图片尺寸将各通道像素值减去平均值比如ImageNet所有训练样本各通道统计平均值
## 然后执行一次前馈网络计算,并输出计算所需的时间
blob = cv2.dnn.blobFromImage(img, 1.0, (w, h), (103.939, 116.779, 123.680), swapRB=False, crop=False)
net.setInput(blob)
start = time.time()
output = net.forward()
end = time.time()
print("风格迁移花费:{:.2f}".format(end - start))
## reshape输出结果, 将减去的平均值加回来,并交换各颜色通道
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output = output.transpose(1, 2, 0)
## 输出风格化后的图片
cv2.imwrite(pathOut, output, [int(cv2.IMWRITE_JPEG_QUALITY), jpg_quality])
if request.method == "POST":
## 测试
import glob
models = glob.glob('./*/*/*.t7')
models ## 列出所有可用的预训练模型
pathOut = './result/result_img96.jpg'
model = './models/instance_norm/mosaic.t7'
style_transfer(pathOut, model, width=500)
result = cv2.imread("result/result_img96.jpg")
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "mosaic_style.html", {'result': result})
return render(request, "mosaic_style.html")
def starry_style(request):
def style_transfer(
pathOut='',
model='',
width=None,
jpg_quality=80):
'''
pathIn: 原始图片的路径
pathOut: 风格化图片的保存路径
model: 预训练模型的路径
width: 设置风格化图片的宽度默认为None, 即原始图片尺寸
jpg_quality: 0-100设置输出图片的质量默认80越大图片质量越好
'''
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
## 读入原始图片,调整图片至所需尺寸,然后获取图片的宽度和高度
(h, w) = img.shape[:2]
if width is not None:
img = cv2.resize(img, (width, round(width * h / w)), interpolation=cv2.INTER_CUBIC)
(h, w) = img.shape[:2]
## 从本地加载预训练模型
print('加载预训练模型......')
net = cv2.dnn.readNetFromTorch(model)
## 将图片构建成一个blob设置图片尺寸将各通道像素值减去平均值比如ImageNet所有训练样本各通道统计平均值
## 然后执行一次前馈网络计算,并输出计算所需的时间
blob = cv2.dnn.blobFromImage(img, 1.0, (w, h), (103.939, 116.779, 123.680), swapRB=False, crop=False)
net.setInput(blob)
start = time.time()
output = net.forward()
end = time.time()
print("风格迁移花费:{:.2f}".format(end - start))
## reshape输出结果, 将减去的平均值加回来,并交换各颜色通道
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output = output.transpose(1, 2, 0)
## 输出风格化后的图片
cv2.imwrite(pathOut, output, [int(cv2.IMWRITE_JPEG_QUALITY), jpg_quality])
if request.method == "POST":
## 测试
import glob
models = glob.glob('./*/*/*.t7')
models ## 列出所有可用的预训练模型
pathOut = './result/result_img95.jpg'
model = './models/instance_norm/starry_night.t7'
style_transfer(pathOut, model, width=500)
result = cv2.imread("result/result_img95.jpg")
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "starry_style.html", {'result': result})
return render(request, "starry_style.html")
def the_scream(request):
def style_transfer(
pathOut='',
model='',
width=None,
jpg_quality=80):
'''
pathIn: 原始图片的路径
pathOut: 风格化图片的保存路径
model: 预训练模型的路径
width: 设置风格化图片的宽度默认为None, 即原始图片尺寸
jpg_quality: 0-100设置输出图片的质量默认80越大图片质量越好
'''
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
## 读入原始图片,调整图片至所需尺寸,然后获取图片的宽度和高度
(h, w) = img.shape[:2]
if width is not None:
img = cv2.resize(img, (width, round(width * h / w)), interpolation=cv2.INTER_CUBIC)
(h, w) = img.shape[:2]
## 从本地加载预训练模型
print('加载预训练模型......')
net = cv2.dnn.readNetFromTorch(model)
## 将图片构建成一个blob设置图片尺寸将各通道像素值减去平均值比如ImageNet所有训练样本各通道统计平均值
## 然后执行一次前馈网络计算,并输出计算所需的时间
blob = cv2.dnn.blobFromImage(img, 1.0, (w, h), (103.939, 116.779, 123.680), swapRB=False, crop=False)
net.setInput(blob)
start = time.time()
output = net.forward()
end = time.time()
print("风格迁移花费:{:.2f}".format(end - start))
## reshape输出结果, 将减去的平均值加回来,并交换各颜色通道
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output = output.transpose(1, 2, 0)
## 输出风格化后的图片
cv2.imwrite(pathOut, output, [int(cv2.IMWRITE_JPEG_QUALITY), jpg_quality])
if request.method == "POST":
## 测试
import glob
models = glob.glob('./*/*/*.t7')
models ## 列出所有可用的预训练模型
pathOut = './result/result_img94.jpg'
model = './models/instance_norm/the_scream.t7'
style_transfer(pathOut, model, width=500)
result = cv2.imread("result/result_img94.jpg")
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "the_scream.html", {'result': result})
return render(request, "the_scream.html")
def none(request):
return redirect('/canny/style/')
def base_draw(request):
def hhds_test():
# 任务1利用numpy绘制一张800*800*3的全黑图像,类型为uint8
img = np.zeros([800, 800, 3], np.uint8)
# 任务2绘制一条起始坐标400,0到0,400的绿色宽20xp的直线
cv2.line(img, (400, 0), (0, 400), (0, 255, 0), 20)
# 任务3绘制一个起始坐标128,128终止坐标512,512宽5xp的白色矩阵
cv2.rectangle(img, (128, 128), (512, 512), (255, 255, 255), 5)
# 任务4绘制一个实心黄色圆形 圆心坐标(512, 512)圆直径128黄色(0,255,255)的闭合图形
cv2.circle(img, (512, 512), 128, (0, 255, 255), -1)
# 任务5绘制青色半椭圆椭圆中心坐标(256, 700)长轴和短轴长度(128, 64)椭圆沿逆时针方向旋转的角度(0)椭圆弧顺时针方向起始的角度(0)椭圆弧顺时针方向结束的角度(360)颜色(255,255,0)闭合图形
cv2.ellipse(img, (256, 700), (128, 64), 0, 0, 360, (255, 255, 0), -1)
# data是图像坐标
data = [[501, 689], [290, 399], [709, 247], [625, 252]]
# 任务6绘制多边形颜色为紫色(255,0,255)
pts = np.array(data, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(img, [pts], True, (255, 0, 255))
font = cv2.FONT_HERSHEY_SCRIPT_COMPLEX
# 任务7: 加入文字文字内容 educoder文字左下角起始坐标(10,100)字体font文字大小(4)白色粗细(2)线类型 LINE_AA
cv2.putText(img, "educoder", (10, 100), font, 4, (255, 255, 255), 2, cv2.LINE_AA)
return img
img = hhds_test()
b64str = cv2.imencode('.png', img)[1].tostring()
b64str = base64.b64encode(b64str)
b64str = b64str.decode()
image = request.FILES.get('img')
return render(request, "base_draw.html", {'b64str': b64str})
def RGB_draw(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
b = img[:, :, 0]
b_b64str = cv2.imencode('.png', b)[1].tostring()
b_b64str = base64.b64encode(b_b64str)
b_b64str = b_b64str.decode()
g = img[:, :, 1]
g_b64str = cv2.imencode('.png', g)[1].tostring()
g_b64str = base64.b64encode(g_b64str)
g_b64str = g_b64str.decode()
r = img[:, :, 2]
r_b64str = cv2.imencode('.png', r)[1].tostring()
r_b64str = base64.b64encode(r_b64str)
r_b64str = r_b64str.decode()
if np.max(b) > 100:
b = b * (255 / np.max(b))
binary_output = np.zeros_like(b)
binary_output[((b > 90) & (b <= 230))] = 255
binary_output = cv2.imencode('.png', binary_output)[1].tostring()
binary_output = base64.b64encode(binary_output)
binary_output = binary_output.decode()
context = {
'b_b64str': b_b64str,
'g_b64str': g_b64str,
'r_b64str': r_b64str,
'binary_output': binary_output
}
return render(request, "RGB_draw.html", context)
return render(request, "RGB_draw.html")
def HSV_draw(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h = hsv[:, :, 0]
h_b64str = cv2.imencode('.png', h)[1].tostring()
h_b64str = base64.b64encode(h_b64str)
h_b64str = h_b64str.decode()
s = hsv[:, :, 1]
s_b64str = cv2.imencode('.png', s)[1].tostring()
s_b64str = base64.b64encode(s_b64str)
s_b64str = s_b64str.decode()
v = hsv[:, :, 2]
v_b64str = cv2.imencode('.png', v)[1].tostring()
v_b64str = base64.b64encode(v_b64str)
v_b64str = v_b64str.decode()
# 下限
low_hsv = np.array([0, 0, 0])
# 上限
high_hsv = np.array([200, 40, 100])
# 滤值
dst = cv2.inRange(hsv, low_hsv, high_hsv)
dst = cv2.imencode('.png', dst)[1].tostring()
dst = base64.b64encode(dst)
dst = dst.decode()
context = {
'h_b64str': h_b64str,
's_b64str': s_b64str,
'v_b64str': v_b64str,
'dst': dst
}
return render(request, "HSV_draw.html", context)
return render(request, "HSV_draw.html")
def and_logic(request):
if request.method == "POST":
# 读取灰度图像
X = cv2.imread('firstapp/static/img/image1.png', 0)
Y = cv2.imread('firstapp/static/img/image2.png', 0)
# 任务1X,Y 进行与运算
########## Begin ##########
result = cv2.bitwise_and(X, Y)
########## End ##########
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "and_logic.html", {'result': result})
else:
return render(request, "and_logic.html")
def or_logic(request):
if request.method == "POST":
# 读取灰度图像
X = cv2.imread('firstapp/static/img/image1.png', 0)
Y = cv2.imread('firstapp/static/img/image2.png', 0)
# 任务1X,Y 进行与运算
########## Begin ##########
result = cv2.bitwise_or(X, Y)
########## End ##########
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "or_logic.html", {'result': result})
else:
return render(request, "or_logic.html")
def not_logic(request):
if request.method == "POST":
# 读取灰度图像
X = cv2.imread('firstapp/static/img/image1.png', 0)
# 任务1X,Y 进行与运算
########## Begin ##########
result = cv2.bitwise_not(X)
########## End ##########
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "not_logic.html", {'result': result})
else:
return render(request, "not_logic.html")
def add_logic(request):
if request.method == "POST":
# 读取灰度图像
X = cv2.imread('firstapp/static/img/add1.png', 1)
Y = cv2.imread('firstapp/static/img/add2.png', 1)
# 任务1X,Y 进行与运算
########## Begin ##########
result = cv2.add(X, Y)
########## End ##########
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "add_logic.html", {'result': result})
else:
return render(request, "add_logic.html")
def sub_logic(request):
if request.method == "POST":
# 读取灰度图像
X = cv2.imread('firstapp/static/img/sub1.png', 1)
Y = cv2.imread('firstapp/static/img/sub2.png', 1)
# 任务1X,Y 进行与运算
########## Begin ##########
result = cv2.subtract(X, Y)
########## End ##########
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "sub_logic.html", {'result': result})
else:
return render(request, "sub_logic.html")
def multi_logic(request):
if request.method == "POST":
# 读取灰度图像
X = cv2.imread('firstapp/static/img/multi1.png', 1).astype(np.float64) / 255
Y = cv2.imread('firstapp/static/img/multi2.png', 1).astype(np.float64) / 255
result = cv2.multiply(X, Y) * 255
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "multi_logic.html", {'result': result})
else:
return render(request, "multi_logic.html")
def divide_logic(request):
if request.method == "POST":
# 读取灰度图像
X = cv2.imread('firstapp/static/img/multi1.png', 1).astype(np.float32) / 255
Y = cv2.imread('firstapp/static/img/multi2.png', 1).astype(np.float32) / 255
# 任务1X,Y 进行与运算
########## Begin ##########
result = cv2.divide(X, Y) * 255
########## End ##########
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, "divide_logic.html", {'result': result})
else:
return render(request, "divide_logic.html")
def expend(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# 放大图像至原来的2倍
result1 = cv2.resize(img, (0, 0), fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)
result1 = cv2.imencode('.png', result1)[1].tostring()
result1 = base64.b64encode(result1)
result1 = result1.decode()
# 图像平移
height, width, channel = img.shape
M = np.float32([[1, 0, 30], [0, 1, 60]])
result2 = cv2.warpAffine(img, M, (width, height))
result2 = cv2.imencode('.png', result2)[1].tostring()
result2 = base64.b64encode(result2)
result2 = result2.decode()
# 图像旋转
rows, cols, depth = img.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 45, 1)
dst = cv2.warpAffine(img, M, (width, height))
result3 = cv2.imencode('.png', dst)[1].tostring()
result3 = base64.b64encode(result3)
result3 = result3.decode()
context = {
'result1': result1,
'result2': result2,
'result3': result3
}
return render(request, "expend.html", context)
return render(request, "expend.html")
def turn(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# 图像放缩
src = cv2.resize(img, (256, 256))
# 水平镜像
horizontal = cv2.flip(src, 1, dst=None)
result1 = cv2.imencode('.png', horizontal)[1].tostring()
result1 = base64.b64encode(result1)
result1 = result1.decode()
# 垂直镜像
vertical = cv2.flip(src, 0, dst=None)
result2 = cv2.imencode('.png', vertical)[1].tostring()
result2 = base64.b64encode(result2)
result2 = result2.decode()
# 对角镜像 ,并保存
cross = cv2.flip(src, -1, dst=None)
result3 = cv2.imencode('.png', cross)[1].tostring()
result3 = base64.b64encode(result3)
result3 = result3.decode()
context = {
'result1': result1,
'result2': result2,
'result3': result3
}
return render(request, "turn.html", context)
return render(request, "turn.html")
def affine(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# 图像放缩
src = cv2.resize(img, (256, 256))
# 获取图像shape
rows, cols = src.shape[: 2]
########Begin########
# 设置图像仿射变化矩阵
pos1 = np.float32([[50, 50], [200, 50], [50, 200]])
pos2 = np.float32([[10, 100], [200, 50], [100, 250]])
M = cv2.getAffineTransform(pos1, pos2)
# 图像仿射变换,及保存
result = cv2.warpAffine(src, M, (rows, cols))
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
context = {
'result': result,
}
return render(request, "affine.html", context)
return render(request, "affine.html")
def gray_histogram(request):
# 计算直方图函数img参数为输入图像img_plt为生成图像灰度直方图
# def histCover(img, fileName):
# plt.figure(fileName, figsize=(16, 8))
# # 展示输入图像
# plt.subplot(121)
# plt.imshow(img, "gray")
# # 展示直方图
# plt.subplot(122)
# """
# 任务1 利用cv2.calcHist()内置函数进行画灰度图像直方图该函数的返回值是hist
# """
# ########## Begin ##########
# hist = cv2.calcHist([img], [0], None, [256], [0, 255])
#
# ########## End ##########
# print('max:', max(hist))
# print('min:', min(hist))
# plt.plot(hist)
# plt.xlim([0, 255])
# plt.savefig(fileName)
# plt.show()
#
#
# img_path = 'E:/imageProcess/firstapp/static/img/a.png'
# img_plt = './f.png'
# """
# 任务2. 读入图像并转化为灰度值数据路径已经给出为img_path
# """
# ########## Begin ##########
# img = Image.open(img_path)
# img = np.array(img)
# img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#
# ########## End ##########
# """
# 任务3. 调用histCover函数绘制直方图看清楚该函数是无返回值的哦
# """
# ########## Begin ##########
# histCover(img_gray, img_plt)
# return render(request,"gray_histogram.html")
if request.method=="POST":
img=cv2.imread("f.png",0)
f = cv2.imencode('.png', img)[1].tostring()
f = base64.b64encode(f)
f = f.decode()
return render(request,"gray_histogram.html",{'f':f})
return render( request,"gray_histogram.html")
def fourier(request):
# img = cv2.imread("E:/imageProcess/firstapp/static/img/a.png", 0)
# dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
# dftShift = np.fft.fftshift(dft)
# result = 20 * np.log(cv2.magnitude(dftShift[:, :, 0], dftShift[:, :, 1]))
#
# plt.subplot(121)
# plt.imshow(img, cmap='gray')
# plt.title('original')
# plt.axis('off')
#
# plt.subplot(122)
# plt.imshow(result, cmap='gray')
# plt.title('result')
# plt.axis('off')
# plt.savefig("./e.png")
# plt.show()
if request.method == "POST":
img = cv2.imread("e.png", 0)
e = cv2.imencode('.png', img)[1].tostring()
e = base64.b64encode(e)
e = e.decode()
return render(request, "fourier.html",{'e':e})
else:
return render(request,"fourier.html")
def edge_detect(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# 转化为灰度图像
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_gray1 = cv2.imencode('.png', img_gray)[1].tostring()
img_gray1 = base64.b64encode(img_gray1)
img_gray1 = img_gray1.decode()
h, w = img_gray.shape
gradient = np.zeros((h, w))
# 计算图像梯度
img_gray = img_gray.astype('float')
for i in range(h - 1):
for j in range(w - 1):
gx = abs(img_gray[i + 1, j] - img_gray[i, j])
gy = abs(img_gray[i, j + 1] - img_gray[i, j])
gradient[i, j] = gx + gy
# 3. 对图像进行增强增强后的图像变量名为sharp
sharp = img_gray + gradient
########## End ##########
sharp = np.where(sharp > 255, 255, sharp)
sharp = np.where(sharp < 0, 0, sharp)
gradient = cv2.imencode('.png', gradient)[1].tostring()
gradient = base64.b64encode(gradient)
gradient = gradient.decode()
sharp = cv2.imencode('.png', sharp)[1].tostring()
sharp = base64.b64encode(sharp)
sharp = sharp.decode()
context = {
'img_gray': img_gray1,
'gradient': gradient,
'sharp': sharp
}
return render(request, "edge_detect.html", context)
return render(request, "edge_detect.html")
def roberts(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# 转化为灰度图像
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_gray1 = cv2.imencode('.png', img_gray)[1].tostring()
img_gray1 = base64.b64encode(img_gray1)
img_gray1 = img_gray1.decode()
# 2. Roberts算子
# 使用numpy定义卷积模板
kernelx = np.array([[-1, 0], [0, 1]], dtype=int)
kernely = np.array([[0, -1], [1, 0]], dtype=int)
# 3. 卷积操作
x = cv2.filter2D(img, cv2.CV_16S, kernelx)
y = cv2.filter2D(img, cv2.CV_16S, kernely)
# 4. 数据格式转换
absX = cv2.convertScaleAbs(x)
absX1 = cv2.imencode('.png', absX)[1].tostring()
absX1 = base64.b64encode(absX1)
absX1 = absX1.decode()
absY = cv2.convertScaleAbs(y)
absY1 = cv2.imencode('.png', absY)[1].tostring()
absY1 = base64.b64encode(absY1)
absY1 = absY1.decode()
# 将两个方向的roberts算子组合起来
Roberts = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
roberts1 = cv2.imencode('.png', Roberts)[1].tostring()
roberts1 = base64.b64encode(roberts1)
roberts1 = roberts1.decode()
context = {
'img_gray': img_gray1,
'absX': absX1,
'absY': absY1,
'roberts': roberts1
}
return render(request, "roberts.html", context)
return render(request, "roberts.html")
def prewitt(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# 1. 灰度化处理图像
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_gray1 = cv2.imencode('.png', img_gray)[1].tostring()
img_gray1 = base64.b64encode(img_gray1)
img_gray1 = img_gray1.decode()
# 2. 求Sobel 算子
x = cv2.Sobel(img_gray, cv2.CV_16S, 1, 0)
y = cv2.Sobel(img_gray, cv2.CV_16S, 0, 1)
# 3. 数据格式转换
absX = cv2.convertScaleAbs(x)
absX1 = cv2.imencode('.png', absX)[1].tostring()
absX1 = base64.b64encode(absX1)
absX1 = absX1.decode()
absY = cv2.convertScaleAbs(y)
absY1 = cv2.imencode('.png', absY)[1].tostring()
absY1 = base64.b64encode(absY1)
absY1 = absY1.decode()
# 4. 组合图像
Roberts = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
roberts1 = cv2.imencode('.png', Roberts)[1].tostring()
roberts1 = base64.b64encode(roberts1)
roberts1 = roberts1.decode()
context = {
'img_gray': img_gray1,
'absX': absX1,
'absY': absY1,
'roberts': roberts1
}
return render(request, "prewitt.html", context)
return render(request, "prewitt.html")
def laplacian(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# 1. 灰度化处理图像
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 2. 高斯滤波
img_gaussianBlur = cv2.GaussianBlur(grayImage, (5, 5), 0)
# 3. 拉普拉斯算法
dst = cv2.Laplacian(img_gaussianBlur, cv2.CV_16S, ksize=3)
# 4. 数据格式转换
Laplacian = cv2.convertScaleAbs(dst)
Laplacian = cv2.imencode('.png', Laplacian)[1].tostring()
Laplacian = base64.b64encode(Laplacian)
Laplacian = Laplacian.decode()
context = {
'Laplacian': Laplacian
}
return render(request, "laplacian.html", context)
return render(request, "laplacian.html")
def log(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# 1. 灰度化处理图像
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 2. 边缘扩充处理图像并使用高斯滤波处理该图像
img = cv2.copyMakeBorder(grayImage, 2, 2, 2, 2, borderType=cv2.BORDER_REPLICATE)
img = cv2.GaussianBlur(img, (3, 3), 0, 0)
# 3. 使用Numpy定义LoG算子
m1 = np.array(
[[0, 0, -1, 0, 0], [0, -1, -2, -1, 0], [-1, -2, 16, -2, -1], [0, -1, -2, -1, 0], [0, 0, -1, 0, 0]],
dtype=np.int32)
# 4. 卷积运算
# 为了使卷积对每个像素都进行运算,原图像的边缘像素要对准模板的中心。
# 由于图像边缘扩大了2像素因此要从位置2到行(列)-2
image1 = np.zeros(img.shape).astype(np.int32)
h, w, _ = img.shape
for i in range(2, h - 2):
for j in range(2, w - 2):
image1[i, j] = np.sum(m1 * img[i - 2:i + 3, j - 2:j + 3, 1])
# 5. 数据格式转换
image1 = cv2.convertScaleAbs(image1)
image1 = cv2.imencode('.png', image1)[1].tostring()
image1 = base64.b64encode(image1)
image1 = image1.decode()
context = {
'image1': image1
}
return render(request, "log.html", context)
return render(request, "log.html")
def canny(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# 1. 高斯滤波
img = cv2.GaussianBlur(img, (3, 3), 0)
# 2. 灰度转换
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 3. 求xy方向的Sobel算子
gradx = cv2.Sobel(grayImage, cv2.CV_16SC1, 1, 0)
grady = cv2.Sobel(grayImage, cv2.CV_16SC1, 0, 1)
# 4. 使用Canny函数处理图像x,y分别是3求出来的梯度低阈值50高阈值150
edge_output = cv2.Canny(gradx, grady, 50, 150)
edge_output = cv2.imencode('.png', edge_output)[1].tostring()
edge_output = base64.b64encode(edge_output)
edge_output = edge_output.decode()
context = {
'edge_output': edge_output
}
return render(request, "canny.html", context)
return render(request, "canny.html")
def hough(request):
if request.method == "POST":
img = cv2.imread("firstapp/static/img/a.png", cv2.IMREAD_UNCHANGED)
img = cv2.GaussianBlur(img, (3, 3), 0)
edges = cv2.Canny(img, 50, 150, apertureSize=3)
# 使用HoughLines算法
# rho为1
# theta为np.pi/2
# threshold为 118
# 其他的默认
######### Begin #########
lines = cv2.HoughLines(edges, 1, np.pi / 2, 118)
######### end ##########
result = img.copy()
for i_line in lines:
for line in i_line:
rho = line[0]
theta = line[1]
if (theta < (np.pi / 4.)) or (theta > (3. * np.pi / 4.0)): # 垂直直线
pt1 = (int(rho / np.cos(theta)), 0)
pt2 = (int((rho - result.shape[0] * np.sin(theta)) / np.cos(theta)), result.shape[0])
cv2.line(result, pt1, pt2, (0, 0, 255))
else:
pt1 = (0, int(rho / np.sin(theta)))
pt2 = (result.shape[1], int((rho - result.shape[1] * np.cos(theta)) / np.sin(theta)))
cv2.line(result, pt1, pt2, (0, 0, 255), 1)
minLineLength = 200
maxLineGap = 15
# 使用HoughLinesP算法
# rho为1
# theta为np.pi/180
# threshold为 80
# 并设置上面提供的minLineLength和maxLineGap
# 其他的默认
######### Begin #########
linesP = cv2.HoughLinesP(edges, 1, np.pi / 180, 80, minLineLength, maxLineGap)
######### end ##########
result_P = img.copy()
for i_P in linesP:
for x1, y1, x2, y2 in i_P:
cv2.line(result_P, (x1, y1), (x2, y2), (0, 255, 0), 3)
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
result_P = cv2.imencode('.png', result_P)[1].tostring()
result_P = base64.b64encode(result_P)
result_P = result_P.decode()
context = {
'result': result,
'result_P': result_P
}
return render(request, "hough.html", context)
else:
return render(request, "hough.html")
def morphology(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# 1. 对读取的图片进行二值转换
im_bin = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
_, img = cv2.threshold(im_bin, 127, 255, cv2.THRESH_BINARY)
# 2. 定义十字形结构元素
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (10, 10), (-1, -1))
# 3. 对二值图进行开运算和闭运算操作
im_op = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
im_cl = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
im_op = cv2.imencode('.png', im_op)[1].tostring()
im_op = base64.b64encode(im_op)
im_op = im_op.decode()
im_cl = cv2.imencode('.png', im_cl)[1].tostring()
im_cl = base64.b64encode(im_cl)
im_cl = im_cl.decode()
context = {
'im_op': im_op,
'im_cl': im_cl
}
return render(request, "morphology.html", context)
return render(request, "morphology.html")
def erode(request):
if request.method == "POST":
img = cv2.imread("firstapp/static/img/e_rode_yuan.png", 0)
# 使用一个5x5的交叉型结构元核心在几何中心对二值图片src进行腐蚀
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (10, 10))
erosion = cv2.erode(img, kernel)
erosion = cv2.imencode('.png', erosion)[1].tostring()
erosion = base64.b64encode(erosion)
erosion = erosion.decode()
context = {
'erosion': erosion
}
return render(request, "erode.html", context)
else:
return render(request, "erode.html")
def swell(request):
if request.method == "POST":
img = cv2.imread("firstapp/static/img/e_rode_yuan.png", 0)
# 交叉结构元
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
# 进行膨胀
dilation = cv2.dilate(img, kernel)
dilation = cv2.imencode('.png', dilation)[1].tostring()
dilation = base64.b64encode(dilation)
dilation = dilation.decode()
context = {
'dilation': dilation
}
return render(request, "swell.html", context)
else:
return render(request, "swell.html")
def open(request):
if request.method == "POST":
img = cv2.imread("firstapp/static/img/e_rode_yuan.png", 0)
# 交叉结构元
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
# 进行开运算
open = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
open = cv2.imencode('.png', open)[1].tostring()
open = base64.b64encode(open)
open = open.decode()
context = {
'open': open
}
return render(request, "open.html", context)
else:
return render(request, "open.html")
def close(request):
if request.method == "POST":
img = cv2.imread("firstapp/static/img/e_rode_yuan.png", 0)
# 交叉结构元
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (10, 10))
# 进行闭运算
close = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
close = cv2.imencode('.png', close)[1].tostring()
close = base64.b64encode(close)
close = close.decode()
context = {
'close': close
}
return render(request, "close.html", context)
else:
return render(request, "close.html")
def noise(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
h = img.shape[0]
w = img.shape[1]
# 将彩色图片转换为灰度图片
grayimage = np.zeros((h, w), np.uint8)
for i in range(h):
for j in range(w):
grayimage[i, j] = 0.11 * img[i, j, 0] + 0.59 * img[i, j, 1] + 0.3 * img[i, j, 2] # python中以B G R存储图像
# 添加椒盐噪声
noiseimage = grayimage.copy()
SNR = 0.95 # 信噪比
pixels = h * w # 计算图像像素点个数
noise_num = int(pixels * (1 - SNR)) # 计算图像椒盐噪声点个数
for i in range(noise_num):
randx = np.random.randint(1, h - 1) # 生成一个 1 至 h-1 之间的随机整数
randy = np.random.randint(1, w - 1) # 生成一个 1 至 w-1 之间的随机整数
if np.random.random() <= 0.5: # np.random.random()生成一个 0 至 1 之间的浮点数
noiseimage[randx, randy] = 0
else:
noiseimage[randx, randy] = 255
grayimage = cv2.imencode('.png', grayimage)[1].tostring()
grayimage = base64.b64encode(grayimage)
grayimage = grayimage.decode()
noiseimage = cv2.imencode('.png', noiseimage)[1].tostring()
noiseimage = base64.b64encode(noiseimage)
noiseimage = noiseimage.decode()
context = {
'grayimage': grayimage,
'noiseimage': noiseimage
}
return render(request, "noise.html", context)
return render(request, "noise.html")
def mean_filter(request):
if request.method == "POST":
img = cv2.imread("firstapp/static/img/image (7).png", cv2.IMREAD_GRAYSCALE)
# 待输出的图片
output = np.zeros(img.shape, np.uint8)
# 遍历图像,进行均值滤波
for i in range(img.shape[0]):
for j in range(img.shape[1]):
# 滤波器内像素值的和
ij = 1
# 遍历滤波器内的像素值
for m in range(-1, 2):
# 防止越界
if 0 <= j + m < img.shape[1]:
ij *= img[i][j + m]
# 求均值,作为最终的像素值
output[i][j] = ij ** (1 / 3)
output = cv2.imencode('.png', output)[1].tostring()
output = base64.b64encode(output)
output = output.decode()
context = {
'output': output
}
return render(request, "mean_filter.html", context)
else:
return render(request, "mean_filter.html")
def sort_filter(request):
if request.method == "POST":
img = cv2.imread("firstapp/static/img/image (7).png", cv2.IMREAD_GRAYSCALE)
# 待输出的图片
output = np.zeros(img.shape, np.uint8)
# 获取列表最大值的函数
def get_max(array):
# 列表的长度
length = len(array)
# 对列表进行选择排序,获得有序的列表
for i in range(length):
for j in range(i + 1, length):
# 选择最大的值
if array[j] > array[i]:
# 交换位置
temp = array[j]
array[j] = array[i]
array[i] = temp
return array[0]
######### Begin #########
array = []
for i in range(img.shape[0]):
for j in range(img.shape[1]):
# 清空滤波器内的像素值
array.clear()
# 遍历滤波器内的像素
for m in range(-1, 2):
for n in range(-1, 2):
# 防止越界
if 0 <= i + m < img.shape[0] and 0 <= j + n < img.shape[1]: # 像素值加到列表中
array.append(img[i + m][j + n])
# 求最大值,作为最终的像素值
output[i][j] = get_max(array)
output = cv2.imencode('.png', output)[1].tostring()
output = base64.b64encode(output)
output = output.decode()
context = {
'output': output
}
return render(request, "sort_filter.html", context)
else:
return render(request, "sort_filter.html")
def choose_filter(request):
if request.method == "POST":
img = cv2.imread("firstapp/static/img/img.png", cv2.IMREAD_GRAYSCALE)
# 待输出的图片
output = np.zeros(img.shape, np.uint8)
# 遍历图像,进行均值滤波
array = []
# 带通的范围
max = 200
for i in range(img.shape[0]):
for j in range(img.shape[1]):
# 滤波器内像素值的和
array.clear()
if img[i][j] > max:
output[i][j] = img[i][j]
else:
output[i][j] = 0
output = cv2.imencode('.png', output)[1].tostring()
output = base64.b64encode(output)
output = output.decode()
context = {
'output': output
}
return render(request, "choose_filter.html", context)
else:
return render(request, "choose_filter.html")
def blank_smooth(request):
if request.method == "POST":
img = cv2.imread("firstapp/static/img/noiseimage.png", 0)
h = img.shape[0]
w = img.shape[1]
# 均值滤波
img_Blur_3 = cv2.blur(img, (3, 3)) # 3*3均值滤波
img_Blur_5 = cv2.blur(img, (5, 5)) # 5*5均值滤波
# 中值滤波
img_MedianBlur_3 = cv2.medianBlur(img, 3) # 3*3中值滤波
img_MedianBlur_5 = cv2.medianBlur(img, 5) # 5*5中值滤波
img_Blur_3 = cv2.imencode('.png', img_Blur_3)[1].tostring()
img_Blur_3 = base64.b64encode(img_Blur_3)
img_Blur_3 = img_Blur_3.decode()
img_Blur_5 = cv2.imencode('.png', img_Blur_5)[1].tostring()
img_Blur_5 = base64.b64encode(img_Blur_5)
img_Blur_5 = img_Blur_5.decode()
img_MedianBlur_3 = cv2.imencode('.png', img_MedianBlur_3)[1].tostring()
img_MedianBlur_3 = base64.b64encode(img_MedianBlur_3)
img_MedianBlur_3 = img_MedianBlur_3.decode()
img_MedianBlur_5 = cv2.imencode('.png', img_MedianBlur_5)[1].tostring()
img_MedianBlur_5 = base64.b64encode(img_MedianBlur_5)
img_MedianBlur_5 = img_MedianBlur_5.decode()
context = {
'img_Blur_3': img_Blur_3,
'img_Blur_5': img_Blur_5,
'img_MedianBlur_3': img_MedianBlur_3,
'img_MedianBlur_5': img_MedianBlur_5,
}
return render(request, "blank_smooth.html", context)
else:
return render(request, "blank_smooth.html")
def low_filter(request):
"""
def filter(img, D0, N=2, type='lp', filter='butterworth'):
'''
频域滤波器
Args:
img: 灰度图片
D0: 截止频率
N: butterworth的阶数(默认使用二阶)
type: lp-低通 hp-高通
filter:butterworth、ideal、Gaussian即巴特沃斯、理想、高斯滤波器
Returns:
imgback滤波后的图像
'''
# 离散傅里叶变换
dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
# 中心化
dtf_shift = np.fft.fftshift(dft)
rows, cols = img.shape
crow, ccol = int(rows / 2), int(cols / 2) # 计算频谱中心
mask = np.zeros((rows, cols, 2)) # 生成rows行cols列的二维矩阵
for i in range(rows):
for j in range(cols):
D = np.sqrt((i - crow) ** 2 + (j - ccol) ** 2) # 计算D(u,v)
if (filter.lower() == 'butterworth'): # 巴特沃斯滤波器
if (type == 'lp'):
mask[i, j] = 1 / (1 + (D / D0) ** (2 * N))
elif (type == 'hp'):
mask[i, j] = 1 / (1 + (D0 / D) ** (2 * N))
else:
assert ('type error')
elif (filter.lower() == 'ideal'): # 理想滤波器
if (type == 'lp'):
if (D <= D0):
mask[i, j] = 1
elif (type == 'hp'):
if (D > D0):
mask[i, j] = 1
else:
assert ('type error')
elif (filter.lower() == 'gaussian'): # 高斯滤波器
if (type == 'lp'):
mask[i, j] = np.exp(-(D * D) / (2 * D0 * D0))
elif (type == 'hp'):
mask[i, j] = (1 - np.exp(-(D * D) / (2 * D0 * D0)))
else:
assert ('type error')
fshift = dtf_shift * mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:, :, 0], img_back[:, :, 1]) # 计算像素梯度的绝对值
img_back = np.abs(img_back)
# img_back = (img_back - np.amin(img_back)) / (np.amax(img_back) - np.amin(img_back))
return img_back
img = cv2.imread('E:/imageProcess/firstapp/static/img/a.png', 0)
# 低通滤波器
plt.subplot(221), plt.imshow(img, cmap='gray'), plt.title('Input')
plt.xticks([]), plt.yticks([])
img_back1 = filter(img, 30, type='lp', filter='ideal') # 截止频率30
plt.subplot(222), plt.imshow(img_back1, cmap='gray'), plt.title('Output_ideal')
plt.xticks([]), plt.yticks([])
img_back2 = filter(img, 30, type='lp', filter='butterworth')
plt.subplot(223), plt.imshow(img_back2, cmap='gray'), plt.title('Output_butterworth')
plt.xticks([]), plt.yticks([])
img_back3 = filter(img, 30, type='lp', filter='gaussian')
plt.subplot(224), plt.imshow(img_back3, cmap='gray'), plt.title('Output_gaussian')
plt.xticks([]), plt.yticks([])
plt.savefig("./test.png")
plt.show()
:param request:
:return:
"""
if request.method == "POST":
result = cv2.imread('test.png', 0)
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, 'low_filter.html', {'result': result})
else:
return render(request, "low_filter.html")
def high_filter(request):
"""
def filter(img, D0, N=2, type='lp', filter='butterworth'):
'''
频域滤波器
Args:
img: 灰度图片
D0: 截止频率
N: butterworth的阶数(默认使用二阶)
type: lp-低通 hp-高通
filter:butterworth、ideal、Gaussian即巴特沃斯、理想、高斯滤波器
Returns:
imgback滤波后的图像
'''
# 离散傅里叶变换
dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
# 中心化
dtf_shift = np.fft.fftshift(dft)
rows, cols = img.shape
crow, ccol = int(rows / 2), int(cols / 2) # 计算频谱中心
mask = np.zeros((rows, cols, 2)) # 生成rows行cols列的二维矩阵
for i in range(rows):
for j in range(cols):
D = np.sqrt((i - crow) ** 2 + (j - ccol) ** 2) # 计算D(u,v)
if (filter.lower() == 'butterworth'): # 巴特沃斯滤波器
if (type == 'lp'):
mask[i, j] = 1 / (1 + (D / D0) ** (2 * N))
elif (type == 'hp'):
mask[i, j] = 1 / (1 + (D0 / D) ** (2 * N))
else:
assert ('type error')
elif (filter.lower() == 'ideal'): # 理想滤波器
if (type == 'lp'):
if (D <= D0):
mask[i, j] = 1
elif (type == 'hp'):
if (D > D0):
mask[i, j] = 1
else:
assert ('type error')
elif (filter.lower() == 'gaussian'): # 高斯滤波器
if (type == 'lp'):
mask[i, j] = np.exp(-(D * D) / (2 * D0 * D0))
elif (type == 'hp'):
mask[i, j] = (1 - np.exp(-(D * D) / (2 * D0 * D0)))
else:
assert ('type error')
fshift = dtf_shift * mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:, :, 0], img_back[:, :, 1]) # 计算像素梯度的绝对值
img_back = np.abs(img_back)
# img_back = (img_back - np.amin(img_back)) / (np.amax(img_back) - np.amin(img_back))
return img_back
img = cv2.imread('E:/imageProcess/firstapp/static/img/a.png', 0)
# 低通滤波器
plt.subplot(221), plt.imshow(img, cmap='gray'), plt.title('Input')
plt.xticks([]), plt.yticks([])
img_back1 = filter(img, 30, type='lp', filter='ideal') # 截止频率30
plt.subplot(222), plt.imshow(img_back1, cmap='gray'), plt.title('Output_ideal')
plt.xticks([]), plt.yticks([])
img_back2 = filter(img, 30, type='lp', filter='butterworth')
plt.subplot(223), plt.imshow(img_back2, cmap='gray'), plt.title('Output_butterworth')
plt.xticks([]), plt.yticks([])
img_back3 = filter(img, 30, type='lp', filter='gaussian')
plt.subplot(224), plt.imshow(img_back3, cmap='gray'), plt.title('Output_gaussian')
plt.xticks([]), plt.yticks([])
plt.savefig("./test.png")
plt.show()
:param request:
:return:
"""
if request.method == "POST":
result = cv2.imread('test1.png', 0)
result = cv2.imencode('.png', result)[1].tostring()
result = base64.b64encode(result)
result = result.decode()
return render(request, 'high_filter.html', {'result': result})
else:
return render(request, "high_filter.html")
def blank_sharp(request):
# # 读取图像
# image = cv2.imread('E:/imageProcess/firstapp/static/img/a.png')
# lena = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#
# # 灰度转化处理
# grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#
# # roberts算子
# kernelx = np.array([[-1, 0], [0, 1]], dtype=int)
# kernely = np.array([[0, -1], [1, 0]], dtype=int)
# x = cv2.filter2D(grayImage, cv2.CV_16S, kernelx)
# y = cv2.filter2D(grayImage, cv2.CV_16S, kernely)
#
# # 转uint8
# absX = cv2.convertScaleAbs(x)
# absY = cv2.convertScaleAbs(y)
#
# # 加权和
# Roberts = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
#
# # 显示中文标签
# plt.rcParams['font.sans-serif'] = ['SimHei']
#
# # 图像显示
# titles = [u'原始图像', u'Robertes图像']
# images = [lena, Roberts]
#
# for i in range(2):
# plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
# plt.xticks([]), plt.yticks([])
# plt.title(titles[i])
# plt.savefig("./a.png")
# plt.show()
#
# lena = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#
# # 灰度转化处理
# grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#
# # kernel
# kernelX = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]], dtype=int)
# kernelY = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]], dtype=int)
# x = cv2.filter2D(grayImage, cv2.CV_16S, kernelX)
# y = cv2.filter2D(grayImage, cv2.CV_16S, kernelY)
#
# # 转uint8
# absX = cv2.convertScaleAbs(x)
# absY = cv2.convertScaleAbs(y)
#
# # 加权和
# Prewitt = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
#
# # 图象显示
# plt.rcParams['font.sans-serif'] = ['SimHei']
# titles = [u'原始图像', u'Prewitt图像']
# images = [lena, Prewitt]
#
# for i in range(2):
# plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
# plt.xticks([]), plt.yticks([])
# plt.title(titles[i])
# plt.savefig("./b.png")
# plt.show()
#
# lean = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#
# # 转灰度图像
# grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#
# # Sobel算子
# x = cv2.Sobel(grayImage, cv2.CV_16S, 1, 0) # 对x一阶求导
# y = cv2.Sobel(grayImage, cv2.CV_16S, 0, 1) # 对y一阶求导
# absX = cv2.convertScaleAbs(x)
# absY = cv2.convertScaleAbs(y)
# Sobel = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
#
# # 图像输出
# plt.rcParams['font.sans-serif'] = ['SimHei']
# titles = [u'原始图像', u'Sobel图像']
# images = [lean, Sobel]
#
# for i in range(2):
# plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
# plt.xticks([]), plt.yticks([])
# plt.title(titles[i])
# plt.savefig("./c.png")
# plt.show()
#
# lean = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#
# # 转灰度图像
# grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#
# # 拉普拉斯算法
# dst = cv2.Laplacian(grayImage, cv2.CV_16S, ksize=3)
# Laplacian = cv2.convertScaleAbs(dst)
#
# # 图像输出
# plt.rcParams['font.sans-serif'] = ['SimHei']
# titles = [u'原始图像', u'Laplacian图像']
# images = [lean, Laplacian]
#
# for i in range(2):
# plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
# plt.xticks([]), plt.yticks([])
# plt.title(titles[i])
# plt.savefig("./d.png")
# plt.show()
if request.method == "POST":
a = cv2.imread('a.png', 0)
a = cv2.imencode('.png', a)[1].tostring()
a = base64.b64encode(a)
a = a.decode()
b = cv2.imread('b.png', 0)
b = cv2.imencode('.png', b)[1].tostring()
b = base64.b64encode(b)
b = b.decode()
c = cv2.imread('c.png', 0)
c = cv2.imencode('.png', c)[1].tostring()
c = base64.b64encode(c)
c = c.decode()
d = cv2.imread('d.png', 0)
d = cv2.imencode('.png', d)[1].tostring()
d = base64.b64encode(d)
d = d.decode()
context = {
'a': a,
'b': b,
'c': c,
'd': d
}
return render(request, "blank_sharp.html", context)
else:
return render(request, "blank_sharp.html")
def draw_gray(request):
# img=cv2.imread("E:/imageProcess/firstapp/static/img/a.png")
# # 绘制出该图片 r, g, b 不同通道的直方图
# # 使用 opencv 的方法进行绘制
# # 绘图颜色由该通道颜色决定
# # x 轴范围设置为 0 ~ 256
# ########## Begin ##########
# color = ['r', 'g', 'b']
# histr = cv2.calcHist([img], [0], None, [256], [0, 256])
# plt.plot(histr, color='r')
# plt.xlim([0, 256])
# histr = cv2.calcHist([img], [1], None, [256], [0, 256])
# plt.plot(histr, color='g')
# plt.xlim([0, 256])
# histr = cv2.calcHist([img], [2], None, [256], [0, 256])
# plt.plot(histr, color='b')
# plt.xlim([0, 256])
# plt.savefig("./g.png")
# return render(request,"draw_gray.html")
if request.method=="POST":
img = cv2.imread("g.png", cv2.IMREAD_UNCHANGED)
g = cv2.imencode('.png', img)[1].tostring()
g = base64.b64encode(g)
g = g.decode()
return render(request,"draw_gray.html",{'g':g})
return render(request,"draw_gray.html")
def diff_gray(request):
# # 灰度图图像直方图的输出
# def grayHist(img, filename):
# plt.figure(filename, figsize=(16, 8))
# # 展示输入图像
# plt.subplot(121)
# plt.imshow(img, 'gray')
# # 展示直方图
# plt.subplot(122)
# h, w = img.shape[:2]
#
# ########## Begin ##########
# """
# 任务1.将二维图像矩阵reshape()为一维数组返回值为pixelSequence
# """
# pixelSequence = img.ravel()
#
# ########## End ##########
# # 将二维图像矩阵reshape()为一维数组
# numberBins = 256
# ########## Begin ##########
# """
# 任务2.调用hist()的方法进行直方图的绘制返回值有histogram, bins, patch
# """
# histogram, bins, patch = plt.hist(pixelSequence, numberBins)
#
# ########## End ##########
# print(max(histogram))
# plt.xlabel("gray label")
# plt.ylabel("number of pixels")
# plt.axis([0, 255, 0, np.max(histogram)])
# # 打印输出峰值
# plt.savefig(filename)
# plt.show()
#
#
# # 定义图像数据的路径
# img_path = 'E:/imageProcess/firstapp/static/img/a.png'
# out_path = './h.png'
# out2_path = './i.png'
#
# img = cv2.imread(img_path, 0)
# h, w = img.shape[:2]
# out = np.zeros(img.shape, np.uint8)
#
# ########## Begin ##########
# """
# 任务3.通过遍历对不同像素范围内进行分段线性变化,在这里分三段函数进行分段线性变化,主要还是考察for循环的应用
# #y=0.5*x(x<50)
# #y=3.6*x-310(50<=x<150)
# #y=0.238*x+194(x>=150)
# """
# for i in range(h):
# for j in range(w):
# pix = img[i][j]
# if pix < 50:
# out[i][j] = 0.5 * pix
# elif pix < 150:
# out[i][j] = 3.6 * pix - 310
# else:
# out[i][j] = 0.238 * pix + 194
#
# ########## End ##########
# out = np.around(out)
# out = out.astype(np.uint8)
# grayHist(img, out_path)
# grayHist(out, out2_path)
# return render(request,"diff_gray.html")
if request.method=="POST":
img1=cv2.imread("h.png",cv2.IMREAD_UNCHANGED)
img2=cv2.imread("i.png", cv2.IMREAD_UNCHANGED)
h = cv2.imencode('.png', img1)[1].tostring()
h = base64.b64encode(h)
h = h.decode()
i = cv2.imencode('.png', img2)[1].tostring()
i = base64.b64encode(i)
i = i.decode()
context={
'h':h,
'i':i
}
return render(request,"diff_gray.html",context)
return render(request,"diff_gray.html")
def color_histogram(request):
# # 计算直方图函数img参数为输入图像img_plt为生成图像灰度直方图
# def histCover(img, fileName):
# color = ["r", "g", "b"]
# # 展示原始图像
# ########## Begin ##########
# """
# 任务1.展示原始图像,因为用到cv2函数读取要用matplotlib库函数所以应该转BGR格式为RGB格式
# """
# b, g, r = cv2.split(img)
# img_change = cv2.merge([r, g, b])
# plt.imshow(img_change)
#
# ########## End ##########
# plt.subplot(121)
# plt.imshow(img)
# plt.subplot(122)
# # 绘制彩色直方图,需要对每个通道进行遍历,并且找到最大值和最小值
# for index, c in enumerate(color):
# ########## Begin ##########
# """
# 任务2.对每个通道进行直方图的计算和绘制需要调用cv2的计算直方图函数返回值为hist
# """
# hist = cv2.calcHist([img_change], [index], None, [256], [0, 255])
#
# ########## End ##########
# print('max:', max(hist))
# print('min:', min(hist))
# plt.plot(hist, color=c)
# plt.xlim([0, 255])
# plt.savefig(fileName)
# plt.show()
#
# # 主函数的定义,定义图片路径
#
# img_path = 'E:/imageProcess/firstapp/static/img/a.png'
# img_plt = './j.png'
#
# ########## Begin ##########
# """
# 任务3.根据给出的图像路径加载图像图像数据路径为img_path返回值为imgOri1
# """
# imgOri1 = cv2.imread(img_path)
#
# ########## End ##########
# histCover(imgOri1, img_plt)
# return render(request,"color_histogram.html")
if request.method=="POST":
img=cv2.imread("j.png",cv2.IMREAD_UNCHANGED)
j = cv2.imencode('.png', img)[1].tostring()
j = base64.b64encode(j)
j = j.decode()
return render(request,"color_histogram.html",{'j':j})
return render(request,"color_histogram.html")
def small_wave(request):
image = request.FILES.get('img')
if image:
img = cv2.imdecode(np.fromstring(image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
img = cv2.resize(img, (448, 448))
# 将多通道图像变为单通道图像
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype(np.float32)
plt.figure('二维小波一级变换')
coeffs = dwt2(img, 'haar')
cA, (cH, cV, cD) = coeffs
plt.subplot(221), plt.imshow(cA, 'gray'), plt.title("A")
plt.subplot(222), plt.imshow(cH, 'gray'), plt.title("H")
plt.subplot(223), plt.imshow(cV, 'gray'), plt.title("V")
plt.subplot(224), plt.imshow(cD, 'gray'), plt.title("D")
plt.savefig("./k.png")
plt.show()
j=cv2.imread("k.png",cv2.IMREAD_UNCHANGED)
j = cv2.imencode('.png',j)[1].tostring()
j = base64.b64encode(j)
j = j.decode()
return render(request, "small_wave.html",{'j':j})
return render(request, "small_wave.html")