You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

140 lines
7.3 KiB

8 months ago
import numpy as np
import lqmtest_x3_5_pool
import lqmtest_x3_2_load_data
import lqmtest_x3_4_conv_proc
import lqmtest_x3_6_fullconn
import lqmtest_x3_7_nonlinear
import lqmtest_x3_8_classify
import lqmtest_x3_9_label
import lqmtest_x3_10_error
class ModelObj: # 网络对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
self.ObjID = ObjID # 图元号
self.ObjType = ObjType # 图元类别
self.ObjLable = ObjLable # 对象标签
self.ParaString = ParaString # 参数字符串
self.ObjX = ObjX # 对象位置x坐标
self.ObjY = ObjY # 对象位置y坐标
class AjConv_Class(ModelObj): # 卷积调整对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.AjConvProc = self.ajconv_proc # 基本操作函数
self.SetAjConvPara = self.setajconv_para # 参数设置函数
def ajconv_proc(self, images, AjConvPara):
kernel_grad_list = []
bias_grad = 0 # 初始化偏置项梯度为零
for c in images:
# 计算卷积核和偏置项的梯度
# 初始化卷积核梯度为零矩阵
kernel_grad = np.zeros_like(AjConvPara['kernel_info'])
for i in range(AjConvPara['loss'].shape[0]):
for j in range(AjConvPara['loss'].shape[1]):
# 将输入数据数组中对应的子矩阵旋转180度
# 与误差值相乘,累加到卷积核梯度上
kernel_grad+=np.rot90(c[i:i+kernel_grad.shape[0],
j:j + kernel_grad.shape[0]],
2) * AjConvPara['loss'][i, j]
# 将误差值累加到偏置项梯度上
bias_grad += AjConvPara['loss'][i, j]
kernel_grad_list.append(kernel_grad)
# 使用stack函数沿着第0个轴把一百个a数组堆叠起来
result = np.stack(kernel_grad_list, axis=0)
# 沿着第0个维度求和
kernel_grad = np.sum(result, axis=0) / len(images)
# 更新卷积核和偏置项参数
kernel = AjConvPara['kernel_info']-AjConvPara['learning_rate'
] * kernel_grad # 卷积核参数减去学习率乘以卷积核梯度
return kernel # 返回更新后的卷积核
def setajconv_para(self, loss, ConvPara):
kernel = ConvPara['kernel'] # 卷积核信息
learning_rate = float(input("请输入卷积调整的学习率: ")) # 0.01 学习率
loss = np.array([[loss]])
AjConvPara = {'kernel_info': kernel, 'learning_rate':
learning_rate, 'loss': loss}
return AjConvPara
if __name__ == '__main__':
DataSet = lqmtest_x3_2_load_data.Data_Class("DataSet1", 1, "数据集1", [], 120, 330)
# setload_data()函数,获取加载数据集的参数
DataPara = DataSet.SetDataPara()
train_images, test_images = DataSet.LoadData(DataPara)
Conv = lqmtest_x3_4_conv_proc.Conv_Class("Conv1", 2, "卷积1", [], 250, 330)
ConvPara = Conv.SetConvPara()
for i in range(len(train_images) // 32):
images = train_images[i * 32:(i + 1) * 32]
conv_images = [] # 存储卷积处理后的图片的列表
for image in images: # 获取训练集的图片数据
dim = len(image.shape) # 获取矩阵的维度
if dim == 2: # 如果是二维矩阵,则转化为三维矩阵
image_h, image_w = image.shape
image = np.reshape(image, (1, image_h, image_w))
# 调用ConvProc()函数根据ConvPara参数完成卷积计算
output = Conv.ConvProc(image, ConvPara)
conv_images.append(output) # 将卷积结果存储到列表
elif dim == 3: # 若为三维矩阵,则保持不变直接卷积处理
output = Conv.ConvProc(image, ConvPara)
conv_images.append(output)
# 将卷积处理后的图片列表转换为数组形式,方便后续处理
conv_images = np.array(conv_images)
Pool = lqmtest_x3_5_pool.Pool_Class("Pool1", 3, "最大池化1", [], 380, 330)
PoolPara = Pool.SetPollPara()
pool_images = [] # 存储池化处理后的图片的列表
for image in conv_images: # 获取卷积后的图片数据
output = Pool.MaxPoolProc(image, PoolPara)
pool_images.append(output) # 将池化结果存储到列表
# 将池化处理后的图片列表转换为数组形式,方便后续处理
pool_images = np.array(pool_images)
_, _, poolH, poolW = pool_images.shape
FullConn = lqmtest_x3_6_fullconn.FullConn_Class("FullConn1", 4, "全连接1", [], 510, 330)
FullConnPara = FullConn.SetFullConnPara(poolH, poolW)
fullconn_images = [] # 存储全连接处理后的图片的列表
for image in pool_images: # 获取池化后的图片数据
output = FullConn.FullConnProc(image, FullConnPara)
fullconn_images.append(output) # 将全连接处理后的结果存储到列表
# 将全连接处理后的图片列表转换为数组形式,方便后续处理
fullconn_images = np.array(fullconn_images)
Nonline = lqmtest_x3_7_nonlinear.Nonline_Class("Nonline1", 5, "非线性函数1", [], 640, 330)
NonLPara = Nonline.SetNonLPara()
# 存储非线性处理后的图片的列表
nonlinear_images = []
for image in fullconn_images: # 获取全连接处理后的图片数据
output = Nonline.NonlinearProc(image, NonLPara)
# 将非线性处理后的结果存储到列表
nonlinear_images.append(output)
# 将非线性处理后的图片列表转换为数组形式,方便后续处理
nonlinear_images = np.array(nonlinear_images)
Classifier=lqmtest_x3_8_classify.Classifier_Class("Classifier1", 6, "分类1", [], 780, 330)
ClassifyPara = Classifier.SetClassifyPara()
classifier_images = [] # 存储分类处理后的图片的列表
prob_images = [] # 存储分类处理后的概率向量
for image in nonlinear_images: # 获取非线性处理后的图片数据
# 调用softmax函数得到概率分布向量
prob = Classifier.softmax(image)
prob_images.append(prob) # 将概率向量结果存储到列表
output = Classifier.ClassifierProc(image, ClassifyPara)
classifier_images.append(output) # 将分类结果存储到列表
# 将分类的结果列表转换为数组形式,方便后续处理
classifier_images = np.array(classifier_images)
LabelPara = lqmtest_x3_9_label.Label()
label_dict = LabelPara.setlabel_para()
right_label = LabelPara.label_array(i)
labeled_samples = LabelPara.label_proc(images,
right_label, label_dict)
Error = lqmtest_x3_10_error.Error_Class("Error1", 7, "误差计算1", [], 710, 124)
ErrorPara = Error.SetErrorPara()
# 输入值是一个概率矩阵,真实标签值是一个类别列表
prob_images = np.squeeze(prob_images)
loss = Error.ErrorProc(prob_images, right_label, ErrorPara)
AjConv = AjConv_Class("AjConv1", 8, "卷积调整1", [], 250, 70)
AjConvPara = AjConv.SetAjConvPara(loss, ConvPara)
ConvPara['kernel'] = AjConv.AjConvProc(images, AjConvPara)