You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

131 lines
6.6 KiB

8 months ago
import numpy as np
import lqmtest_x3_5_pool
import lqmtest_x3_2_load_data
import lqmtest_x3_4_conv_proc
import lqmtest_x3_6_fullconn
import lqmtest_x3_7_nonlinear
import lqmtest_x3_8_classify
import lqmtest_x3_9_label
import lqmtest_x3_10_error
import lqmtest_x3_11_ajconv
class ModelObj: # 网络对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
self.ObjID = ObjID # 图元号
self.ObjType = ObjType # 图元类别
self.ObjLable = ObjLable # 对象标签
self.ParaString = ParaString # 参数字符串
self.ObjX = ObjX # 对象位置x坐标
self.ObjY = ObjY # 对象位置y坐标
class AjFullconn_Class(ModelObj): # 全连接调整对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.AjFullconnProc = self.ajfullconn_proc # 基本操作函数
self.SetAjFCPara = self.setajfc_para # 参数设置函数
def ajfullconn_proc(self, AjFCPara):
# 计算权重矩阵和偏置向量的梯度,使用链式法则
gradient_weights = np.outer(AjFCPara['loss'],
AjFCPara['learning_rate'])
# 更新权重矩阵和偏置向量
weight_matrix = AjFCPara['weights'] - gradient_weights
bias_vector = AjFCPara['bias'] - AjFCPara[
'learning_rate'] * AjFCPara['bias']
return weight_matrix, bias_vector # 返回更新后的权重矩阵和偏置向量
def setajfc_para(self, loss, FullConnPara):
weights = FullConnPara["weights"]
bias = FullConnPara["bias"]
loss = np.array([loss])
AjFCPara = { # 全连接权重,全连接偏置,学习率,误差值
'weights': weights, 'bias': bias,
'learning_rate': 0.01, 'loss': loss }
return AjFCPara
if __name__ == '__main__':
DataSet = lqmtest_x3_2_load_data.Data_Class("DataSet1", 1, "数据集1", [], 120, 330)
# setload_data()函数,获取加载数据集的参数
DataPara = DataSet.SetDataPara()
train_images, test_images = DataSet.LoadData(DataPara)
Conv = lqmtest_x3_4_conv_proc.Conv_Class("Conv1", 2, "卷积1", [], 250, 330)
ConvPara = Conv.SetConvPara()
for i in range(len(train_images) // 32):
images = train_images[i * 32:(i + 1) * 32]
conv_images = [] # 存储卷积处理后的图片的列表
for image in images: # 获取训练集的图片数据
dim = len(image.shape) # 获取矩阵的维度
if dim == 2: # 如果是二维矩阵,则转化为三维矩阵
image_h, image_w = image.shape
image = np.reshape(image, (1, image_h, image_w))
# 调用ConvProc()函数根据ConvPara参数完成卷积计算
output = Conv.ConvProc(image, ConvPara)
conv_images.append(output) # 将卷积结果存储到列表
elif dim == 3: # 若为三维矩阵,则保持不变直接卷积处理
output = Conv.ConvProc(image, ConvPara)
conv_images.append(output)
# 将卷积处理后的图片列表转换为数组形式,方便后续处理
conv_images = np.array(conv_images)
Pool = lqmtest_x3_5_pool.Pool_Class("Pool1", 3, "最大池化1", [], 380, 330)
PoolPara = Pool.SetPollPara()
pool_images = [] # 存储池化处理后的图片的列表
for image in conv_images: # 获取卷积后的图片数据
output = Pool.MaxPoolProc(image, PoolPara)
pool_images.append(output) # 将池化结果存储到列表
# 将池化处理后的图片列表转换为数组形式,方便后续处理
pool_images = np.array(pool_images)
_, _, poolH, poolW = pool_images.shape
FullConn = lqmtest_x3_6_fullconn.FullConn_Class("FullConn1", 4, "全连接1", [], 510, 330)
FullConnPara = FullConn.SetFullConnPara(poolH, poolW)
fullconn_images = [] # 存储全连接处理后的图片的列表
for image in pool_images: # 获取池化后的图片数据
output = FullConn.FullConnProc(image, FullConnPara)
fullconn_images.append(output) # 将全连接处理后的结果存储到列表
# 将全连接处理后的图片列表转换为数组形式,方便后续处理
fullconn_images = np.array(fullconn_images)
Nonline = lqmtest_x3_7_nonlinear.Nonline_Class("Nonline1", 5, "非线性函数1", [], 640, 330)
NonLPara = Nonline.SetNonLPara()
# 存储非线性处理后的图片的列表
nonlinear_images = []
for image in fullconn_images: # 获取全连接处理后的图片数据
output = Nonline.NonlinearProc(image, NonLPara)
# 将非线性处理后的结果存储到列表
nonlinear_images.append(output)
# 将非线性处理后的图片列表转换为数组形式,方便后续处理
nonlinear_images = np.array(nonlinear_images)
Classifier=lqmtest_x3_8_classify.Classifier_Class("Classifier1", 6, "分类1", [], 780, 330)
ClassifyPara = Classifier.SetClassifyPara()
classifier_images = [] # 存储分类处理后的图片的列表
prob_images = [] # 存储分类处理后的概率向量
for image in nonlinear_images: # 获取非线性处理后的图片数据
# 调用softmax函数得到概率分布向量
prob = Classifier.softmax(image)
prob_images.append(prob) # 将概率向量结果存储到列表
output = Classifier.ClassifierProc(image, ClassifyPara)
classifier_images.append(output) # 将分类结果存储到列表
# 将分类的结果列表转换为数组形式,方便后续处理
classifier_images = np.array(classifier_images)
LabelPara = lqmtest_x3_9_label.Label()
label_dict = LabelPara.setlabel_para()
right_label = LabelPara.label_array(i)
labeled_samples = LabelPara.label_proc(images,
right_label, label_dict)
Error = lqmtest_x3_10_error.Error_Class("Error1", 7, "误差计算1", [], 710, 124)
ErrorPara = Error.SetErrorPara()
# 输入值是一个概率矩阵,真实标签值是一个类别列表
prob_images = np.squeeze(prob_images)
loss = Error.ErrorProc(prob_images, right_label, ErrorPara)
AjConv = lqmtest_x3_11_ajconv.AjConv_Class("AjConv1", 8, "卷积调整1", [], 250, 70)
AjConvPara = AjConv.SetAjConvPara(loss, ConvPara)
ConvPara['kernel'] = AjConv.AjConvProc(images, AjConvPara)
AjFullconn = AjFullconn_Class("AjFullconn1", 9, "全连接调整1", [], 510, 120)
AjFCPara = AjFullconn.SetAjFCPara(loss, FullConnPara)
weight, bias = AjFullconn.AjFullconnProc(AjFCPara)