Compare commits

..

No commits in common. 'master' and 'main' have entirely different histories.
master ... main

21
.gitignore vendored

@ -0,0 +1,21 @@
# ---> Vim
# Swap
[._]*.s[a-v][a-z]
!*.svg # comment out if you don't need vector files
[._]*.sw[a-p]
[._]s[a-rt-v][a-z]
[._]ss[a-gi-z]
[._]sw[a-p]
# Session
Session.vim
Sessionx.vim
# Temporary
.netrwhist
*~
# Auto-generated tag files
tags
# Persistent undo
[._]*.un~

3
.idea/.gitignore vendored

@ -1,3 +0,0 @@
# Default ignored files
/shelf/
/workspace.xml

@ -1 +0,0 @@
X3_new.py

@ -1,14 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="PublishConfigData">
<serverData>
<paths name="root@222.187.226.110:35261">
<serverdata>
<mappings>
<mapping local="$PROJECT_DIR$" web="/" />
</mappings>
</serverdata>
</paths>
</serverData>
</component>
</project>

@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Encoding">
<file url="file://$PROJECT_DIR$/data/data.csv" charset="GBK" />
<file url="file://$PROJECT_DIR$/data/test.csv" charset="GBK" />
<file url="file://$PROJECT_DIR$/data/train.csv" charset="GBK" />
</component>
</project>

@ -1,14 +0,0 @@
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredPackages">
<value>
<list size="1">
<item index="0" class="java.lang.String" itemvalue="thop" />
</list>
</value>
</option>
</inspection_tool>
</profile>
</component>

@ -1,6 +0,0 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="Python 3.8" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8" project-jdk-type="Python SDK" />
</project>

@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/神经网络可视化.iml" filepath="$PROJECT_DIR$/.idea/神经网络可视化.iml" />
</modules>
</component>
</project>

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/data" vcs="Git" />
</component>
</project>

@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="Python 3.8" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.3 KiB

@ -1,8 +0,0 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

@ -1,27 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="FacetManager">
<facet type="django" name="Django">
<configuration>
<option name="rootFolder" value="$MODULE_DIR$" />
<option name="settingsModule" value="HelloWorld/settings.py" />
<option name="manageScript" value="$MODULE_DIR$/manage.py" />
<option name="environment" value="&lt;map/&gt;" />
<option name="doNotUseTestRunner" value="false" />
<option name="trackFilePattern" value="migrations" />
</configuration>
</facet>
</component>
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component>
<component name="TemplatesService">
<option name="TEMPLATE_CONFIGURATION" value="Django" />
</component>
</module>

@ -1,14 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="PublishConfigData" remoteFilesAllowedToDisappearOnAutoupload="false">
<serverData>
<paths name="root@222.187.226.110:35261">
<serverdata>
<mappings>
<mapping local="$PROJECT_DIR$" web="/" />
</mappings>
</serverdata>
</paths>
</serverData>
</component>
</project>

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Encoding" addBOMForNewFiles="with NO BOM" />
</project>

@ -1,7 +0,0 @@
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="Eslint" enabled="true" level="WARNING" enabled_by_default="true" />
<inspection_tool class="TsLint" enabled="true" level="WARNING" enabled_by_default="true" />
</profile>
</component>

@ -1,6 +0,0 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8" project-jdk-type="Python SDK" />
</project>

@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/HelloWorld.iml" filepath="$PROJECT_DIR$/.idea/HelloWorld.iml" />
</modules>
</component>
</project>

@ -1,16 +0,0 @@
"""
ASGI config for HelloWorld project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HelloWorld.settings')
application = get_asgi_application()

@ -1,129 +0,0 @@
"""
Django settings for HelloWorld project.
Generated by 'django-admin startproject' using Django 3.2.13.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-a8_of0cvho)d!rmhlzadvv$bv#1m%7w!^bn!ir%2$(cag!4!o&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'HelloWorld.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'HelloWorld.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': "cheshi",
'USER':"root",
'PASSWORD':'123123',
'HOST':'127.0.0.1',
'PORT':'3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'

@ -1,21 +0,0 @@
"""HelloWorld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]

@ -1,16 +0,0 @@
"""
WSGI config for HelloWorld project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HelloWorld.settings')
application = get_wsgi_application()

@ -1,64 +0,0 @@
# -*- coding: utf-8 -*-
# Time : 2023/12/8 9:06
# Author : lirunsheng
# User : l'r's
# Software: PyCharm
# File : demo1.py
import tkinter as tk
def show_page(page_num):
# 隐藏所有页面
for page in pages:
page.pack_forget()
# 显示指定页面
pages[page_num].pack()
def next_page():
global current_page
if current_page < len(pages) - 1:
current_page += 1
show_page(current_page)
def prev_page():
global current_page
if current_page > 0:
current_page -= 1
show_page(current_page)
# 创建主窗口
root = tk.Tk()
# 创建页面
page1 = tk.Frame(root)
page1.pack()
tk.Label(page1, text="第一页").pack()
page2 = tk.Frame(root)
page2.pack()
tk.Label(page2, text="第二页").pack()
page3 = tk.Frame(root)
page3.pack()
tk.Label(page3, text="第三页").pack()
pages = [page1, page2, page3]
current_page = 0
# 创建翻页按钮
prev_btn = tk.Button(root, text="上一页", command=prev_page)
prev_btn.pack(side=tk.LEFT)
next_btn = tk.Button(root, text="下一页", command=next_page)
next_btn.pack(side=tk.RIGHT)
# 显示初始页面
show_page(current_page)
# 运行主循环
root.mainloop()

@ -1,22 +0,0 @@
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HelloWorld.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()

@ -1,3 +0,0 @@
from django.contrib import admin
# Register your models here.

@ -1,6 +0,0 @@
from django.apps import AppConfig
class StudentConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'student'

@ -1,3 +0,0 @@
from django.test import TestCase
# Create your tests here.

@ -1,3 +0,0 @@
from django.shortcuts import render
# Create your views here.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 500 KiB

@ -1,413 +0,0 @@
# -*- encoding: utf-8 -*-
'''
@File : X1_new.py
@License : (C)Copyright 2018-2022
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2023/6/30 15:51 zart20 1.0 None
'''
'''
编程16.1编制程序对卷积神经网络模型的数据结构赋值
目的及编程说明读者通过编程16.1可理解可视化构建卷积神经网络模型相关的数据结构及其作用这里给出的数据结构
能够支持16.2及16.4节的输出界面即能够绘制出一张卷积神经网络结构图编程16.1要求为模型对象总表AllModelObj
和AllModelConn赋值要求包含所有类型的图元对象见下表述并建立适当的连接关系
网络对象总表AllModelObj(ObjIDObjTypeObjLableProcFunc, SetParaFunc, ParaString, ObjXObjY)
其中ObjID 表示图元号ObjType表示图元类别1数据集2卷积3池化4全连接5非线性6分类7误差计算
8卷积调整9全连接调整ObjLable为对象标签ProcFunc为该图元被执行时的基本操作函数
SetParaFunc为给基本操作函数赋参数值的函数ParaString 为同类不同图元的参数ObjX,
ObjY为对象在仿真输出屏幕中的位置示例中未给出由图形界面中的位置产生
'''
class ModelObj: # 网络对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
self.ObjID = ObjID # 图元号
self.ObjType = ObjType # 图元类别
self.ObjLable = ObjLable # 对象标签
self.ParaString = ParaString # 参数字符串
self.ObjX = ObjX # 对象位置x坐标
self.ObjY = ObjY # 对象位置y坐标
class Data_Class(ModelObj): # 数据集网络对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.LoadData = self.load_data # 基本操作函数
self.SetDataPara = self.set_data_para # 参数设置函数
def load_data(self, DataPara):
pass # X3具体实现
def set_data_para(self):
pass # X3具体实现
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.LoadData,
self.SetDataPara, self.ParaString, self.ObjX, self.ObjY]
return result
# if __name__ == '__main__':
# DataSet = Data_Class("DataSet1", 1, "数据集1", ".", 120, 330)
# print(DataSet)
class Conv_Class(ModelObj): # 卷积对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.ConvProc = self.conv_proc # 基本操作函数
self.SetConvPara = self.setconv_para # 参数设置函数
def conv_proc(self, image, ConvPara):
pass # X3具体实现
def setconv_para(self):
pass # X3具体实现
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.ConvProc, self.SetConvPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class Pool_Class(ModelObj): # 池化对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.MaxPoolProc = self.pool_proc # 基本操作函数
self.SetPollPara = self.setpool_para # 参数设置函数
def pool_proc(self, image, PoolPara):
pass # X3具体实现
def setpool_para(self):
pass # X3具体实现
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.MaxPoolProc, self.SetPollPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class FullConn_Class(ModelObj): # 全连接对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.FullConnProc = self.fullconn_proc # 基本操作函数
self.SetFullConnPara = self.setfullconn_para # 参数设置函数
def fullconn_proc(self, inputdata, FullConnPara):
pass # X3具体实现
def setfullconn_para(self, data):
pass # X3具体实现
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.FullConnProc, self.SetFullConnPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class Nonline_Class(ModelObj): # 非线性对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.NonlinearProc = self.nonlinear_proc # 基本操作函数
self.SetNonLPara = self.setnonl_para # 参数设置函数
def nonlinear_proc(self, inputdata, NonLPara):
pass # X3具体实现
def setnonl_para(self):
pass # X3具体实现
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.NonlinearProc, self.SetNonLPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class Classifier_Class(ModelObj): # 分类对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.ClassifierProc = self.classifier_proc # 基本操作函数
self.SetClassifyPara = self.setclassify_para # 参数设置函数
def classifier_proc(self, inputdata, ClassifyPara):
pass # X3具体实现
def setclassify_para(self):
pass # X3具体实现
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.ClassifierProc, self.SetClassifyPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class Error_Class(ModelObj): # 误差计算对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.ErrorProc = self.error_proc # 基本操作函数
self.SetErrorPara = self.seterror_para # 参数设置函数
def error_proc(self, input, label, ErrorPara):
pass # X3具体实现
def seterror_para(self):
pass # X3具体实现
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.ErrorProc, self.SetErrorPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class AjConv_Class(ModelObj): # 卷积调整对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.AjConvProc = self.ajconv_proc # 基本操作函数
self.SetAjConvPara = self.setajconv_para # 参数设置函数
def ajconv_proc(self, input, AjConvPara):
pass # X3具体实现
def setajconv_para(self):
pass # X3具体实现
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.AjConvProc, self.SetAjConvPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class AjFullconn_Class(ModelObj): # 全连接调整对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.AjFullconnProc = self.ajfullconn_proc # 基本操作函数
self.SetAjFCPara = self.setajfc_para # 参数设置函数
def ajfullconn_proc(self):
pass # X3具体实现
def setajfc_para(self, AjFCPara):
print(AjFCPara) # X3具体实现
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.AjFullconnProc, self.SetAjFCPara, self.ParaString, self.ObjX,
self.ObjY]
return result
# AjFullconn = AjFullconn_Class("AjFullconn", 9,
# "全连接调整1", [], 510, 120)
# AjFullconn.SetAjFCPara('rrr')
DataSet = Data_Class("DataSet1", 1, "数据集1", [], 120, 330).output()
Conv = Conv_Class("Conv1", 2, "卷积1", [], 250, 330).output()
Pool = Pool_Class("Pool1", 3, "最大池化1", [], 380, 330).output()
FullConn = FullConn_Class("FullConn1", 4, "全连接1", [], 510, 330).output()
Nonline = Nonline_Class("Nonline1", 5, "非线性函数1", [], 640, 330).output()
Classifier = Classifier_Class("Classifier1", 6, "分类1", [], 780, 330).output()
Error = Error_Class("Error1", 7, "误差计算1", [], 710, 124).output()
AjConv = AjConv_Class("AjConv1", 8, "卷积调整1", [], 250, 70).output()
AjFullconn = AjFullconn_Class("AjFullconn1", 9,
"全连接调整1", [], 510, 120).output()
AllModelObj = [DataSet, Conv, Pool, FullConn, Nonline, Classifier, Error, AjConv, AjFullconn]
# AjFullconn.SetAjFCPara(7)
# AjFullconn.SetAjFCPara(7)
# print(AllModelObj)
# 定义网络连接对象类
class ModelConn:
def __init__(self, ConnObjID, ConnType, NobjS, NobjE):
self.ConnObjID = ConnObjID # 连接线编号
self.ConnType = ConnType # 连接线类别
self.NobjS = NobjS # 源图元对象
self.NobjE = NobjE # 目标图元对象
def output(self): # 输出方法
# 创建一个空列表
result = [self.ConnObjID, self.ConnType, self.NobjS, self.NobjE]
return result
# if __name__ == '__main__':
# ···AllModelObj方法继承
# # 创建连接对象实例
# Line1 = ModelConn(1, 1, DataSet.ObjID, Conv.ObjID)
# Line2 = ModelConn(2, 1, Conv.ObjID, Pool.ObjID)
# Line3 = ModelConn(3, 1, Pool.ObjID, FullConn.ObjID)
# Line4 = ModelConn(4, 1, FullConn.ObjID, Nonline.ObjID)
# Line5 = ModelConn(5, 1, Nonline.ObjID, Classifier.ObjID)
# Line6 = ModelConn(6, 1, Classifier.ObjID, Error.ObjID)
# Line7 = ModelConn(7, 2, Error.ObjID, AjFullconn.ObjID)
# Line8 = ModelConn(8, 2, Error.ObjID, AjConv.ObjID)
# Line9 = ModelConn(9, 2, AjFullconn.ObjID, FullConn.ObjID)
# Line10 = ModelConn(10, 2, AjConv.ObjID, Conv.ObjID)
#
# # 网络连接对象总表
# AllModelConn = [[Line1], [Line2], [Line3], [Line4],
# [Line5], [Line6], [Line7], [Line8],
# [Line9], [Line10]]
# print(AllModelConn)
# X16.1赋值
AllModelObj = [
['DataSet1', 1, '数据集1', 'LoadData',
'SetDataPara', [], 120, 330],
['Conv1', 2, '卷积1', 'ConvProc',
'SetConvPara', [], 250, 330],
['Pool1', 3, '最大池化1', 'MaxPoolProc',
'SetPollPara', [], 380, 330],
['FullConn1',4,'全连接1','FullConnProc',
'SetFullConnPara', [], 510,330],
['Nonline1',5,'非线性函数1', 'NonlinearProc',
'SetNonLPara', [], 640, 330],
['Classifier1',6,'分类1','ClassifierProc',
'SetClassifyPara',[],780,330],
['Error1', 7, '误差计算1', 'ErrorProc',
'SetErrorPara', [], 710, 124],
['AjConv1', 8, '卷积调整1', 'AjConvProc',
'SetAjConvPara', [], 250, 70],
['AjFullconn1',9,'全连接调整1','AjFullconnProc',
'SetAjFCPara',[],510,120]]
AllModelConn = [
[1, 1, 'DataSet1', 'Conv1'], [2, 1, 'Conv1', 'Pool1'],
[3, 1, 'Pool1', 'FullConn1'], [4, 1, 'FullConn1', 'Nonline1'],
[5, 1, 'Nonline1', 'Classifier1'], [6, 1, 'Classifier1', 'Error1'],
[7, 2, 'Error1', 'AjFullconn1'], [8, 2, 'Error1', 'AjConv1'],
[9, 2, 'AjFullconn1', 'FullConn1'], [10, 2, 'AjConv1', 'Conv1']]
# 定义一个函数根据给定的参数生成一个AllModelObj对象
def create_almodelobj():
all_model = []
obj_label = ["数据集", "卷积", "池化", "全连接", "非线性",
"分类", "误差计算", "卷积调整", "全连接调整"]
proc_func = ['LoadData', 'ConvProc', 'MaxPoolProc', 'FullConnProc',
'NonlinearProc', 'ClassifierProc', 'ErrorProc',
'AjConvProc', 'AjFullconnProc']
set_para_func = ['SetDataPara', 'SetConvPara', 'SetPollPara',
'SetFullConnPara', 'SetNonLPara', 'SetClassifyPara',
'SetErrorPara','SetAjConvPara', 'SetAjFCPara']
for i in range(9):
obj = [] # 创建一个列表,存储对象的属性
obj.append(i) # 对象ID
obj.append(i % 9 + 1) # 对象类型
obj.append(obj_label[i % 9] + f"{i // 9 + 1}") # 对象标签
obj.append(proc_func[i % 9] + f"{i // 9 + 1}") # 对象基本功能函数
obj.append(set_para_func[i % 9] + f"{i // 9 + 1}") # 对象功能函数参数
obj.append('.')
obj.append(i * 20)
obj.append(i * 20)
all_model.append(obj)
return all_model
# if __name__ == '__main__':
# all_model = create_almodelobj()
# print(all_model)
# 定义一个函数生成一个AllModelConn对象的列表
def create_allmodelconn():
all_conn = [] # 创建一个空列表,存储所有对象
conn_obj_id = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] #储连接线编号
conn_type = [1, 1, 1, 1, 1, 1, 2, 2, 2, 2] #储连接线类别
# 定义一个列表,存储源图元对象
nobj_s = ["DataSet1", "Conv1", "Pool1", "FullConn1", "Nonline1",
"Classifier1", "Error1", "Error1", "AjFullconn1", "AjConv1"]
# 定义一个列表,存储目标图元对象
nobj_e = ["Conv1", "Pool1", "FullConn1", "Nonline1", "Classifier1",
"Error1", "AjFullconn1", "AjConv1", "FullConn1", "Conv1"]
all_conn = [ # 使用列表推导式,根据给定的参数生成所有对象
[ # 创建一个列表,存储对象的属性
conn_obj_id[i], # 连接线编号
conn_type[i], # 连接线类别
nobj_s[i], # 源图元对象
nobj_e[i] # 目标图元对象
]
for i in range(len(conn_obj_id)) # 循环遍历连接线编号的长度
]
return all_conn
# print(create_allmodelconn())
# if __name__ == '__main__':
# all_conn = create_allmodelconn()
# print(all_conn)
# 导入需要的库
import numpy as np # 用于处理数组
from PIL import Image # 用于读取图片
'''
编程16.2编制程序练习加载输入数据
目的及编程说明读者通过编程16.2可理解加载数据的过程本项目中假定输入数据的类型为图片数据
要求读者能够实现将程序指定的图片转换为卷积神经网络模型能够接受的数据格式即数组具体实现要求
1程序读取一个固定路径的图片,
2将图片缩放为 32x32固定大小并将其转换为数组格式
3将数组中具体数值归一化该程序为编制LoadData()奠定基础
'''
# 定义一个函数,实现图片转换为数组的功能
def image_to_array(path, height, width):
img = Image.open(path).convert("L") #转换为灰度模式
img = img.resize((height, width)) # 将图片缩放为height*width的固定大小
data = np.array(img) # 将图片转换为数组格式
data = data / 255.0 # 将数组中的数值归一化除以255
return data
# if __name__ == '__main__':
# path = 'data/train/狗/1.jpg' # 路径
# height, width =32, 32 # 尺寸hw
# data_image = image_to_array(path,height,width)
# print(data_image)
'''
编程16.3编制程序完成指定卷积核参数的卷积操作
目的及编程说明读者通过编程16.3可理解卷积操作的具体计算过程该函数的输入数据为数组格式的数据具体实现要求
1程序中给出卷积核大小为3*3的矩阵并给矩阵赋初始值例如初值[[ 1, -1, 2], [ 3, -2, 1], [ 0, 1, 0]]
[[ 0, 1, 0], [ 1, -4, 1], [ 0, 1, 0]]或自己设定
2默认步长stride为13完成卷积计算
4返回卷积后的数组该程序为编制ConvProc()奠定基础
'''
# 导入需要的库
import numpy as np # 用于处理数组
# 定义一个函数,实现卷积操作的功能
def convolve(data, kernel):
# 获取数据和卷积核的形状
data_height, data_width = data.shape # 获取数据高宽
kernel_height, kernel_width = kernel.shape # 获取卷积核高宽
output_height = data_height - kernel_height + 1 # 以步长为1来计算输出的高宽
output_width = data_width - kernel_width + 1
output = np.zeros((output_height, output_width)) # 创建一个空的输出数组
for i in range(output_height): # 遍历输出数组的每个元素,计算卷积值
for j in range(output_width):
data_region = data[i:i+kernel_height, j:j+kernel_width] # 获取数据和卷积核的对应区域
conv_value = np.sum(data_region * kernel) # 计算对应区域的元素乘积之和,作为卷积值
output[i, j] = conv_value # 将卷积值赋给输出数组的对应元素
return output
# if __name__ == '__main__':
# path = 'data/train/狗/1.jpg' # 路径
# height, width = 28, 28 # 尺寸hw
# data_image = image_to_array(path, height, width)
#
# # 定义一个卷积核,可以自己修改
# kernel = np.array([[1, -1, 2],
# [3, -2, 1],
# [0, 1, 0]])
# output = convolve(data_image, kernel) # 传入数据和卷积核
# print(output)
'''
编程16.4编制程序完成指定参数的全连接操作
目的及编程说明读者通过编程16.4可理解全连接操作的具体计算过程
该函数的输入数据为数组格式的数据输出为计算后的数组具体实现要求
1程序中给定权重矩阵[[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]和偏置向量[0.1, 0.2]
可自己自主设定参数
2计算输入数据和权重矩阵的乘积注意矩阵的维度并加上偏置向量
3返回计算后的数组该程序为编制FullConnProc()奠定基础
'''
# 定义一个函数,实现全连接操作
def full_connect(input_data, weight_matrix, bias_vector):
output_data = np.dot(input_data, weight_matrix) # 计算输入数据和权重矩阵的乘积
bias_vector = np.expand_dims(bias_vector, axis=0) # 扩展偏置向量的形状
bias_vector = np.expand_dims(bias_vector, axis=0)
output_data = output_data + bias_vector # 加上偏置向量
return output_data # 返回输出数据
if __name__ == '__main__':
weight_matrix = np.array([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]) # 给定权重矩阵
bias_vector = np.array([0.1, 0.2, 0.3]) # 给定偏置向量
# 假设输入数据为一个三维数组,每一层代表一个样本,每一行代表一个特征,每一列代表一个通道
input_data = np.array([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]],
[[13, 14], [15, 16], [17, 18]]])
output_data = full_connect(input_data, weight_matrix, bias_vector)
print(output_data)

@ -1,224 +0,0 @@
import tkinter as tk
from PIL import Image, ImageTk
global Viewcanvas # 定义画布
global Root # 主窗口
global AllModelObj #网络对象
'''
编程16.5编制程序依据AllModelObj和AllModelConn数据结构产生如图16.2的输出界面
目的及编程说明读者通过编程16.5可理解卷积神经网络模型构建的输出界面数据结构及初始值参见编程16.1
'''
# 定义图元对象类
class ModelObj:
def __init__(self, ObjID, ObjType, ObjLable, ProcFunc, SetParaFunc, ParaString, ObjX, ObjY):
self.ObjID = ObjID # 图元号
self.ObjType = ObjType # 图元类别
self.ObjLable = ObjLable # 对象标签
self.ProcFunc = ProcFunc # 基本操作函数
self.SetParaFunc = SetParaFunc # 参数设置函数
self.ParaString = ParaString # 参数字符串
self.ObjX = ObjX # 对象位置x坐标
self.ObjY = ObjY # 对象位置y坐标
def output(self): # 输出方法
# 创建一个空列表
result = []
# 将对象的属性添加到列表中
result.append(self.ObjID)
result.append(self.ObjType)
result.append(self.ObjLable)
result.append(self.ProcFunc)
result.append(self.SetParaFunc)
result.append(self.ParaString)
result.append(self.ObjX)
result.append(self.ObjY)
# 返回列表
return result
# 定义网络连接对象类
class ModelConn:
def __init__(self, ConnObjID, ConnType, NobjS, NobjE):
self.ConnObjID = ConnObjID # 连接线编号
self.ConnType = ConnType # 连接线类别
self.NobjS = NobjS # 源图元对象
self.NobjE = NobjE # 目标图元对象
def __repr__(self):
return f"{self.ConnObjID}, {self.ConnType}, {self.NobjS}, {self.NobjE}"
def create_instance():
# 创建图元对象实例
DataSet = ModelObj("DataSet", 1, "数据集", "LoadData", "SetDataPara", ".", 120, 330).output()
Conv = ModelObj("Conv", 2, "卷积", "ConvProc", "SetConvPara", ".", 250, 330).output()
Pool = ModelObj("Pool", 3, "最大池化", "MaxPoolProc", "SetPollPara", ".", 380, 330).output()
FullConn = ModelObj("FullConn", 4, "全连接", "FullConnProc", "SetFullConnPara", ".", 510, 330).output()
Nonline = ModelObj("Nonline", 5, "非线性函数", "NonlinearProc", "SetNonLPara", ".", 640, 330).output()
Classifier = ModelObj("Classifier", 6, "分类", "ClassifierProc", "SetClassifyPara", ".", 780, 330).output()
Error = ModelObj("Error", 7, "误差计算", "ErrorProc", "SetErrorPara", ".", 710, 124).output()
AjConv = ModelObj("AjConv", 8, "卷积调整", "AjConvProc", "SetAjConvPara", ".", 250, 70).output()
AjFullconn = ModelObj("AjFullconn", 9, "全连接调整", "AjFullconnProc", "SetAjFCPara", ".", 510, 120).output()
listinstance = [DataSet,Conv,Pool,FullConn,Nonline,Classifier,Error,AjConv,AjFullconn]
return listinstance # 还回图元对象实例列表
# if __name__ == '__main__':
# listinstance = create_instance()
# for instance in listinstance:
# print(instance)
def connect_class(listinstance):
# 创建连接对象实例
Line1 = ModelConn(1, 1, listinstance[0], listinstance[1])
Line2 = ModelConn(2, 1, listinstance[1], listinstance[2])
Line3 = ModelConn(3, 1, listinstance[2], listinstance[3])
Line4 = ModelConn(4, 1, listinstance[3], listinstance[4])
Line5 = ModelConn(5, 1, listinstance[4], listinstance[5])
Line6 = ModelConn(6, 1, listinstance[5], listinstance[6])
Line7 = ModelConn(7, 2, listinstance[6], listinstance[8])
Line8 = ModelConn(8, 2, listinstance[6], listinstance[7])
Line9 = ModelConn(9, 2, listinstance[8], listinstance[3])
Line10 = ModelConn(10, 2, listinstance[7], listinstance[1])
listclass = [Line1,Line2, Line3, Line4, Line5, Line6, Line7, Line8, Line9, Line10]
return listclass # # 还回连接对象实例
# if __name__ == '__main__':
# listinstance = create_instance()
# listclass = connect_class(listinstance)
# for iclass in listclass:
# print(iclass)
def element(path):
imgs = Image.open(path) # 加载图元对应的图片文件
imgs = imgs.resize((60, 50)) # 使用resize方法调整图片
imgs = ImageTk.PhotoImage(imgs) # 把Image对象转换成PhotoImage对象
Root.img = imgs # 保存图片的引用,防止被垃圾回收
return imgs
# if __name__ == '__main__':
# Root = tk.Tk() # 创建一个主窗口
# img_path = ["img/data.png", "img/conv.png", "img/pool.png", "img/full_connect.png", "img/nonlinear.png",
# "img/classifier.png", "img/error.png", "img/adjust.png"] # 图元路径
# list_image = [] # 定义一个列表存储PhotoImage对象
# for path in img_path:
# list_image.append(element(path))
# for image in list_image:
# print(image) # 打印结果
def window():
global Root
global Viewcanvas
Root = tk.Tk() # 创建一个主窗口
# 设置窗口的大小为1200*750
window_width = 900 # 窗口的宽度
window_height = 550 # 窗口的高度
Root.title("神经网络可视化")
Root.geometry("900x550") # 设置窗口的大小和位置
# 创建一个画布,用于绘制矩形框,设置画布的大小和背景色
Viewcanvas = tk.Canvas(Root, width=window_width, height=window_height, bg="white")
# 将画布添加到主窗口中
Viewcanvas.pack()
# 绘制矩形框,使用不同的颜色和线宽,指定矩形框的左上角和右下角坐标,填充色,边框色和边框宽度
Viewcanvas.create_rectangle(5, 5, 895, 545, fill=None, outline="lightblue", width=2)
# if __name__ == '__main__':
# window()
# path = "img/data.png" # 图元路径
# image =element(path)
# print(image) # 打印结果
# Root.mainloop()
def connecting_lines(obj_x, obj_y, obj_x1, obj_x2, obj_x3, obj_y1, obj_y2, obj_y3, image, text, smooth, width):
Viewcanvas.create_image(obj_x, obj_y, image=image) # 创建图元对象
Viewcanvas.create_text(obj_x1, obj_y1, text=text, font=("黑体", 14)) # 创建图元对象的标签
Viewcanvas.create_line(obj_x2, obj_y2, obj_x3, obj_y3, arrow=tk.LAST, # 创建数据线箭头
arrowshape=(16, 20, 4), fill='lightblue', smooth=smooth, width=width)
def connectings_lines(obj_x, obj_y, obj_x1, obj_x2, obj_x3,obj_x4, obj_y1, obj_y2, obj_y3, obj_y4, image, text, smooth, width):
# 创建图元对象
Viewcanvas.create_image(obj_x, obj_y, image=image)
# 创建图元对象的标签
Viewcanvas.create_text(obj_x1, obj_y1, text=text, font=("黑体", 14))
# 创建数据线箭头
Viewcanvas.create_line(obj_x2, obj_y2, obj_x3, obj_y3, obj_x4, obj_y4, arrow=tk.LAST,
arrowshape=(16, 20, 4), fill='lightblue', smooth=smooth, width=width)
# if __name__ == '__main__':
# window()
# listinstance = create_instance()
# # 创建网络对象总表和网络连接对象总表
# AllModelObj = [listinstance[0], listinstance[1], listinstance[2], listinstance[3], listinstance[4], listinstance[5],
# listinstance[6], listinstance[7], listinstance[8]]
# img_path = ["img/data.png", "img/conv.png", "img/pool.png", "img/full_connect.png", "img/nonlinear.png",
# "img/classifier.png", "img/error.png", "img/adjust.png"]
# list_image = []
# for path in img_path:
# list_image.append(element(path))
# obj_x = AllModelObj[0][6] # 根据对象的id计算x坐标
# obj_y = AllModelObj[0][7] # 根据对象的id计算y坐标
# obj_x2 = AllModelObj[5][6] # 根据对象的id计算x坐标
# obj_y2 = AllModelObj[5][7] # 根据对象的id计算y坐标
# connecting_lines(obj_x, obj_y, 0, 32, 100, 50, 0, 0, list_image[0], " 加载" + "\n" + "数据集", True, 3)
# connectings_lines(obj_x2, obj_y2, 0, 0, 0, -50, 50, -30, -120, -180, list_image[5], "类别", False, 3)
# Root.mainloop()
def switch(obj_type, obj_x, obj_y,listimage):
if obj_type == 1: # 加载数据集
connecting_lines(obj_x, obj_y, obj_x+0, obj_x+32, obj_x+100, obj_y+50, obj_y+0, obj_y+0, listimage[0], " 加载" + "\n" + "数据集", True, 3)
elif obj_type == 2: # 卷积
connecting_lines(obj_x, obj_y, obj_x+0, obj_x+30, obj_x+100, obj_y+50, obj_y+0, obj_y+0, listimage[1], "卷积", True, 3)
elif obj_type == 3: # 池化
connecting_lines(obj_x, obj_y, obj_x+0, obj_x+30, obj_x+100, obj_y+50, obj_y+0, obj_y+0, listimage[2], "池化", True, 3)
elif obj_type == 4: # 全连接
connecting_lines(obj_x, obj_y, obj_x+0, obj_x+30, obj_x+100, obj_y+50, obj_y+0, obj_y+0, listimage[3], "全连接" + "\n" + " 函数", True, 3)
elif obj_type == 5: # 非线性
connecting_lines(obj_x, obj_y, obj_x+0, obj_x+30, obj_x+110, obj_y+50, obj_y+0, obj_y+0, listimage[4], "非线性" + "\n" + " 函数", True, 3)
elif obj_type == 6: # 分类
connectings_lines(obj_x, obj_y, obj_x+0, obj_x+0, obj_x+0, obj_x-50, obj_y+50, obj_y-30, obj_y-120, obj_y-180, listimage[5], "类别", False, 3)
elif obj_type == 7: # 误差计算
connectings_lines(obj_x, obj_y, obj_x+0, obj_x-20, obj_x-50, obj_x-420, obj_y-40,obj_y -20,obj_y -60,obj_y -60, listimage[6], "误差", False, 2)
connecting_lines(obj_x, obj_y, obj_x+0, obj_x-40, obj_x-170, obj_y-40, obj_y+0, obj_y+0, listimage[6], "误差", False, 2)
elif obj_type == 8: # 调整
connecting_lines(obj_x, obj_y, obj_x-80, obj_x+0, obj_x+0, obj_y+0, obj_y+30, obj_y+235, listimage[7], "调整1", False, 2)
elif obj_type == 9: # 调整
connecting_lines(obj_x, obj_y, obj_x-80, obj_x+0, obj_x+0, obj_y+0,obj_y+ 30,obj_y+ 183, listimage[7], "调整2", False, 2)
def creating_elements(AllModelObj,listimage):
# 遍历AllModelObj列表在窗口左侧创建图元菜单
for obj in AllModelObj:
# 获取图元对象的类型、标签等信息
obj_type = obj[1]
# 并且要根据需求调整每个对象的位置
obj_x = obj[6] # 根据对象的id计算x坐标
obj_y = obj[7] # 根据对象的id计算y坐标
# 根据对象的类型,绘制相应的图形
switch(obj_type, obj_x, obj_y,listimage)
def main():
global AllModelObj
window()
listinstance = create_instance()
listclass = connect_class(listinstance)
# 创建网络对象总表和网络连接对象总表
AllModelObj = [listinstance[0],listinstance[1], listinstance[2], listinstance[3], listinstance[4], listinstance[5],
listinstance[6], listinstance[7], listinstance[8]]
AllModelConn = [listclass[0], listclass[1], listclass[2], listclass[3], listclass[4], listclass[5], listclass[6],
listclass[7], listclass[8], listclass[9]]
img_path = ["img/data.png", "img/conv.png", "img/pool.png", "img/full_connect.png", "img/nonlinear.png",
"img/classifier.png", "img/error.png", "img/adjust.png"]
list_image = []
for path in img_path:
list_image.append(element(path))
creating_elements(AllModelObj, list_image)
print(1)
Root.mainloop()
if __name__ == '__main__':
main()

@ -1,136 +0,0 @@
import tkinter as tk
from PIL import Image, ImageTk
from X1 import *
global Viewcanvas # 定义画布
global Root # 主窗口
global AllModelObj #网络对象
'''
编程16.5编制程序依据AllModelObj和AllModelConn数据结构产生如图16.2的输出界面
目的及编程说明读者通过编程16.5可理解卷积神经网络模型构建的输出界面数据结构及初始值参见编程16.1
'''
def create_instance():
global AllModelObj
global DataSet, Conv, Pool, FullConn, Nonline, Classifier, Error, AjConv, AjFullconn
DataSet = Data_Class("DataSet1", 1, "数据集1", ".", 120, 330)
Conv = Conv_Class("Conv1", 2, "卷积1", ".", 250, 330)
Pool = Pool_Class("Pool1", 3, "最大池化1", ".", 380, 330)
FullConn = FullConn_Class("FullConn1", 4, "全连接1", ".", 510, 330)
Nonline = Nonline_Class("Nonline1", 5, "非线性函数1", ".", 640, 330)
Classifier = Classifier_Class("Classifier1", 6, "分类1", ".", 780, 330)
Error = Error_Class("Error1", 7, "误差计算1", ".", 710, 124)
AjConv = AjConv_Class("AjConv1", 8, "卷积调整1", ".", 250, 70)
AjFullconn = AjFullconn_Class("AjFullconn1", 9, "全连接调整1", ".", 510, 120)
AllModelObj = [DataSet, Conv, Pool, FullConn, Nonline, Classifier, Error, AjConv, AjFullconn]
def connect_class():
global AllModelConn
# 创建连接对象实例
Line1 = ModelConn(1, 1, DataSet.ObjID, Conv.ObjID).output()
Line2 = ModelConn(2, 1, Conv.ObjID, Pool.ObjID).output()
Line3 = ModelConn(3, 1, Pool.ObjID, FullConn.ObjID).output()
Line4 = ModelConn(4, 1, FullConn.ObjID, Nonline.ObjID).output()
Line5 = ModelConn(5, 1, Nonline.ObjID, Classifier.ObjID).output()
Line6 = ModelConn(6, 1, Classifier.ObjID, Error.ObjID).output()
Line7 = ModelConn(7, 2, Error.ObjID, AjFullconn.ObjID).output()
Line8 = ModelConn(8, 2, Error.ObjID, AjConv.ObjID).output()
Line9 = ModelConn(9, 2, AjFullconn.ObjID, FullConn.ObjID).output()
Line10 = ModelConn(10, 2, AjConv.ObjID, Conv.ObjID).output()
# 网络连接对象总表
AllModelConn = [Line1, Line2, Line3, Line4,
Line5, Line6, Line7, Line8,
Line9, Line10]
def element(path):
img = Image.open(path) # 加载图元对应的图片文件
img = img.resize((60, 50)) # 使用resize方法调整图片
img = ImageTk.PhotoImage(img) # 把Image对象转换成PhotoImage对象
Root.img = img # 保存图片的引用,防止被垃圾回收
return img
def window():
global Root
global Viewcanvas
Root = tk.Tk() # 创建一个主窗口
# 设置窗口的大小为1200*750
window_width = 900 # 窗口的宽度
window_height = 550 # 窗口的高度
Root.title("神经网络可视化")
Root.geometry("900x550") # 设置窗口的大小和位置
# 创建一个画布,用于绘制矩形框,设置画布的大小和背景色
Viewcanvas = tk.Canvas(Root, width=window_width, height=window_height, bg="white")
# 将画布添加到主窗口中
Viewcanvas.pack()
# 绘制矩形框,使用不同的颜色和线宽,指定矩形框的左上角和右下角坐标,填充色,边框色和边框宽度
Viewcanvas.create_rectangle(5, 5, 895, 545, fill=None, outline="lightblue", width=2)
def connecting_lines(obj_x, obj_y, text, text_record,image):
Viewcanvas.create_image(obj_x, obj_y, image=image) # 创建图元对象
Viewcanvas.create_text(obj_x + text_record[0], obj_y + text_record[1], text=text, font=("黑体", 14)) # 创建图元对象的标签
def conn_lines(start, end, index):
smooth = [False, True]
width = [2, 4]
if start[0] == end[0]:
Viewcanvas.create_line(start[0], start[1] + 30, end[0] , end[1] - 30, arrow=tk.LAST,
arrowshape=(16, 20, 4), fill='lightblue', smooth=smooth[index], width=width[index])
elif start[1] == end[1]:
Viewcanvas.create_line(start[0] + 30, start[1], end[0] - 30, end[1], arrow=tk.LAST,
arrowshape=(16, 20, 4), fill='lightblue', smooth=smooth[index], width=width[index])
else:
if abs(start[0]-end[0]) > abs(start[1]-end[1]):
# 创建数据线箭头
Viewcanvas.create_line(start[0]-15, start[1], int((start[0] + end[0])/2), end[1], end[0] + 30, end[1], arrow=tk.LAST,
arrowshape=(16, 20, 4), fill='lightblue', smooth=smooth[index], width=width[index])
else:
# 创建数据线箭头
Viewcanvas.create_line(start[0], start[1] - 20, start[0], end[1], end[0] + 30, end[1], arrow=tk.LAST, arrowshape=(16, 20, 4), fill='lightblue', smooth=smooth[index], width=width[index])
def creating_elements():
text_record = [(0, -50), (0, 50), (-80, 0)]
# 遍历AllModelObj列表在窗口左侧创建图元菜单
for obj in AllModelObj:
# 并且要根据需求调整每个对象的位置
obj_x = obj.ObjX # 根据对象的id计算x坐标
obj_y = obj.ObjY # 根据对象的id计算y坐标
Item_Record.append((obj_x, obj_y))
Item_Name.append(obj.ObjID)
# 根据对象的类型,绘制相应的图形
if 'Error' in obj.ObjID:
connecting_lines(obj_x, obj_y, obj.ObjLable, text_record[0], list_image[obj.ObjType - 1])
elif 'Aj' in obj.ObjID:
connecting_lines(obj_x, obj_y, obj.ObjLable, text_record[2], list_image[-1])
else:
connecting_lines(obj_x, obj_y, obj.ObjLable, text_record[1], list_image[obj.ObjType - 1])
def ligature(): # 连接线
# print(Item_Record)
for conn in AllModelConn:
starting = Item_Name.index(conn[2])
# print(starting)
ending = Item_Name.index(conn[3])
if conn[1] == 1:
# print(Item_Record[starting])
conn_lines(Item_Record[starting], Item_Record[ending], 1)
else:
conn_lines(Item_Record[starting], Item_Record[ending], 0)
if __name__ == '__main__':
global AllModelObj
Item_Record = []
Item_Name = []
window()
create_instance()
connect_class()
img_path = ["img/data.png", "img/conv.png", "img/pool.png", "img/full_connect.png", "img/nonlinear.png",
"img/classifier.png", "img/error.png", "img/adjust.png"]
list_image = []
for path in img_path:
list_image.append(element(path))
creating_elements()
ligature()
Root.mainloop()
# print(Item_Record)

@ -1,128 +0,0 @@
import tkinter as tk
from PIL import Image, ImageTk
from X1 import *
def create_instance():
global AllModelObj
global DataSet, Conv, Pool, FullConn, Nonline, Classifier, Error, AjConv, AjFullconn
DataSet = Data_Class("DataSet1", 1, "数据集1", ".", 120, 330)
Conv = Conv_Class("Conv1", 2, "卷积1", ".", 250, 330)
Pool = Pool_Class("Pool1", 3, "最大池化1", ".", 380, 330)
FullConn = FullConn_Class("FullConn1", 4, "全连接1", ".", 510, 330)
Nonline = Nonline_Class("Nonline1", 5, "非线性函数1", ".", 640, 330)
Classifier = Classifier_Class("Classifier1", 6, "分类1", ".", 780, 330)
Error = Error_Class("Error1", 7, "误差计算1", ".", 710, 124)
AjConv = AjConv_Class("AjConv1", 8, "卷积调整1", ".", 250, 70)
AjFullconn = AjFullconn_Class("AjFullconn1", 9, "全连接调整1", ".", 510, 120)
AllModelObj = [DataSet, Conv, Pool, FullConn, Nonline, Classifier, Error, AjConv, AjFullconn]
def connect_class():
global AllModelConn
# 创建连接对象实例
Line1 = ModelConn(1, 1, DataSet.ObjID, Conv.ObjID).output()
Line2 = ModelConn(2, 1, Conv.ObjID, Pool.ObjID).output()
Line3 = ModelConn(3, 1, Pool.ObjID, FullConn.ObjID).output()
Line4 = ModelConn(4, 1, FullConn.ObjID, Nonline.ObjID).output()
Line5 = ModelConn(5, 1, Nonline.ObjID, Classifier.ObjID).output()
Line6 = ModelConn(6, 1, Classifier.ObjID, Error.ObjID).output()
Line7 = ModelConn(7, 2, Error.ObjID, AjFullconn.ObjID).output()
Line8 = ModelConn(8, 2, Error.ObjID, AjConv.ObjID).output()
Line9 = ModelConn(9, 2, AjFullconn.ObjID, FullConn.ObjID).output()
Line10 = ModelConn(10, 2, AjConv.ObjID, Conv.ObjID).output()
# 网络连接对象总表
AllModelConn = [Line1, Line2, Line3, Line4,
Line5, Line6, Line7, Line8,
Line9, Line10]
class Networking:
def __init__(self):
self.Root = tk.Tk() # 创建一个主窗口
# 设置窗口的大小为1200*750
self.window_width = 900 # 窗口的宽度
self.window_height = 550 # 窗口的高度
self.list_image = []
self.Item_Record = [[], []] # 记录图元坐标与图元号
def window(self):
self.Root.title("神经网络可视化")
self.Root.geometry("900x550") # 设置窗口的大小和位置
# 创建一个画布,用于绘制矩形框,设置画布的大小和背景色
self.Viewcanvas = tk.Canvas(self.Root, width=self.window_width, height=self.window_height, bg="white")
# 将画布添加到主窗口中
self.Viewcanvas.pack()
# 绘制矩形框,使用不同的颜色和线宽,指定矩形框的左上角和右下角坐标,填充色,边框色和边框宽度
self.Viewcanvas.create_rectangle(5, 5, 895, 545, fill=None, outline="lightblue", width=2)
def connecting_lines(self, obj):
obj_x = obj.ObjX # 根据对象的id计算x坐标
obj_y = obj.ObjY # 根据对象的id计算y坐标
text = obj.ObjLable
if 'Error' in obj.ObjID:
x, y = 0, -50
elif 'Aj' in obj.ObjID:
x, y = -80, 0
else:
x, y = 0, 50
self.Viewcanvas.create_image(obj_x, obj_y, image=self.list_image[obj.ObjType - 1]) # 创建图元对象
self.Viewcanvas.create_text(obj_x + x, obj_y + y, text=text, font=("黑体", 14)) # 创建图元对象的标签
def conn_lines(self, conn):
starting = self.Item_Record[1].index(conn[2])
ending = self.Item_Record[1].index(conn[3])
smooth = [False, True]
width = [2, 4]
start, end = self.Item_Record[0][starting], self.Item_Record[0][ending]
index = 1 if conn[1] == 1 else 0
if start[0] == end[0]:
self.Viewcanvas.create_line(start[0], start[1] + 30, end[0], end[1] - 30, arrow=tk.LAST,
arrowshape=(16, 20, 4), fill='lightblue', smooth=smooth[index], width=width[index])
elif start[1] == end[1]:
self.Viewcanvas.create_line(start[0] + 30, start[1], end[0] - 30, end[1], arrow=tk.LAST,
arrowshape=(16, 20, 4), fill='lightblue', smooth=smooth[index], width=width[index])
else:
if abs(start[0] - end[0]) > abs(start[1] - end[1]):
# 创建数据线箭头
self.Viewcanvas.create_line(start[0] - 15, start[1], int((start[0] + end[0]) / 2), end[1], end[0] + 30,
end[1], arrow=tk.LAST,
arrowshape=(16, 20, 4), fill='lightblue', smooth=smooth[index],
width=width[index])
else:
# 创建数据线箭头
self.Viewcanvas.create_line(start[0], start[1] - 20, start[0], end[1], end[0] + 30, end[1], arrow=tk.LAST,
arrowshape=(16, 20, 4), fill='lightblue', smooth=smooth[index],
width=width[index])
def creating_elements(self):
text_record = [(0, -50), (0, 50), (-80, 0)]
# 遍历AllModelObj列表在窗口左侧创建图元菜单
for obj in self.AllModelObj:
# 并且要根据需求调整每个对象的位置
obj_x = obj.ObjX # 根据对象的id计算x坐标
obj_y = obj.ObjY # 根据对象的id计算y坐标
self.Item_Record.append((obj_x, obj_y))
self.connecting_lines(obj,)
def element(self, path):
img = Image.open(path) # 加载图元对应的图片文件
img = img.resize((60, 50)) # 使用resize方法调整图片
img = ImageTk.PhotoImage(img) # 把Image对象转换成PhotoImage对象
self.Root.img = img # 保存图片的引用,防止被垃圾回收
return img
def read_element(self):
img_path = ["img/data.png", "img/conv.png", "img/pool.png", "img/full_connect.png", "img/nonlinear.png",
"img/classifier.png", "img/error.png", "img/adjust.png", "img/adjust.png"]
for path in img_path:
self.list_image.append(self.element(path))
def visual_output(self, AllModelObj, AllModelConn):
for obj in AllModelObj: # 遍历AllModelObj列表在窗口创建图元
self.Item_Record[0].append((obj.ObjX, obj.ObjY)) # 记录图元坐标
self.Item_Record[1].append(obj.ObjID) # 记录图元号
self.connecting_lines(obj) # 根据图元对象信息在画布上画图元
for conn in AllModelConn: # 遍历AllModelConn列表在窗口连线图元
self.conn_lines(conn)
if __name__ == '__main__':
create_instance()
connect_class()
Net = Networking()
Net.window()
Net.read_element()
Net.visual_output(AllModelObj, AllModelConn)
Net.Root.mainloop()

@ -1,586 +0,0 @@
import os
import cv2
import numpy as np
import tkinter as tk
from tkinter import filedialog
from PIL import Image
from X1 import image_to_array
'''
编程16.6编制程序设置加载数据集的参数SetLoadData()及加载数据集LoadData()
目的及编程说明读者通过编程16.6可掌握读取文件及数据预处理的相关方法SetLoadData()设置加载数据集的相关参数
LoadData()按所设置的参数读取数据集SetDataPara函数实现的具体要求为
1确定参数信息如数据集路径信息图片大小每批次读入图片的数量等
2返回DataPara参数
LoadData函数实现的具体要求为
1根据路径信息读入图片
2将图片缩放为固定大小
3将图片转换为数组的形式
4将数组中具体数值归一化
5返回转换后的数组
'''
# 定义一个函数,实现图片转换为数组的功能
def image_to_array(path,height,width):
img = Image.open(path).convert("L") #转换为灰度模式
img = img.resize((height, width)) # 将图片缩放为height*width的固定大小
data = np.array(img) # 将图片转换为数组格式
data = data / 255.0 # 将数组中的数值归一化除以255
return data
def setload_data(): # 定义加载数据集的参数SetLoadData()
# 设置数据集路径信息
train_imgPath = 'data/train/' # 训练集文件夹的位置
test_imgPath = 'data/verify/' # 测试集文件夹的位置
img_width = 48 # 图片宽度
img_height = 48 # 图片高度
# 设置每批次读入图片的数量
batch_size = 32 # 批次大小
# 返回DataPara参数这里用一个字典来存储
DataPara = {"train_imgPath": train_imgPath,
"test_imgPath": test_imgPath,
"img_width": img_width,
"img_height": img_height,
"batch_size": batch_size}
return DataPara
# if __name__ == '__main__':
# root = tk.Tk()
# root.withdraw()
# DataPara = setload_data()
# print(DataPara)
# 定义加载数据集load_data()
def load_data(DataPara):
imgPaths=["train_imgPath","test_imgPath"] # DataPara的键名
listimages=[] # 存储图片的列表
list_path = []
for datapath in imgPaths:
filenames = os.listdir(DataPara[datapath])
Path = []
for tfname in filenames:
if os.path.isdir(tfname) == False: # 如果不是子文件夹,就是图片文件
path = os.path.join(DataPara[datapath], tfname) # 使用os.path.join()拼接图片路径
Path.append(path)
file_paths = sorted(Path, key=lambda x: int(x.split("/")[-1].split(".")[0]))
list_path.append(file_paths)
for data_path in list_path:
images = [] # 存储图片的列表
for path in data_path:
# print(path)
img = image_to_array(path, DataPara["img_width"], DataPara["img_height"]) # 读取图片数据
img = img.T # 转置,图像的行和列将互换位置。
images.append(img) # 将图片数组添加到训练集图片列表中
listimages.append(np.array(images)) # 返回转换后的数组
return listimages[0],listimages[1]
# if __name__ == '__main__':
# root = tk.Tk()
# root.withdraw()
# DataPara = setload_data()
# train_images, test_images = load_data(DataPara)
# print(train_images.shape)
# print(test_images.shape)
'''
编程16.7编制程序设置卷积参数SetConvPara()及卷积函数ConvProc()
目的及编程说明读者通过编程16.7可实现卷积神经网络中的重要步骤卷积它将一个卷积核应用于输入图像
以提取特征SetConvPara()设置相关参数ConvProc()按相关参数完成卷积计算
SetConvPara ()实现的具体要求为
1确定参数信息卷积核大小步长填充
2返回ConvPara参数
ConvProc() 实现的具体要求为
1获取输入数据的大小
2获取卷积核的大小
3计算输出数据的大小
4对于每一个输出位置进行卷积操作
5返回卷积计算后的数组特征向量
'''
# 定义设置卷积参数的函数SetConvPara()
def setconv_para():
kernel_h = 3 # 设置卷积核大小这里假设是3x3
kernel_w = 3
kernel = [[1.289202, -1.471377, -0.238452],# 卷积核为3x3的单位矩阵(高斯分布)通常再乘上一个小的数比如0.1
[-0.562343, -0.019988, -0.441446],
[1.627381, 1.390266, 0.812486]]
stride = 1 # 设置步长这里假设是1
padding = 0 # 设置填充这里假设是0
ConvPara = {"kernel": kernel,# 返回ConvPara参数这里用一个字典来存储
"kernel_h": kernel_h,
"kernel_w": kernel_w,
"stride": stride,
"padding": padding}
return ConvPara
# if __name__ == '__main__':
# ConvPara = setconv_para()
# print(ConvPara)
def conv_proc(image, ConvPara): # 定义卷积函数ConvProc()
c, image_h, image_w = image.shape # 获取输入数据的大小,这里假设是单通道的图片
kernel_h = ConvPara["kernel_h"] # 获取卷积核的大小和卷积核
kernel_w = ConvPara["kernel_w"]
kernel = ConvPara["kernel"]
out_h = (image_h - kernel_h) // ConvPara["stride"] + 1 # 计算输出数据的大小
out_w = (image_w - kernel_w) // ConvPara["stride"] + 1
output = np.zeros((c, out_h, out_w)) # 初始化输出数据为零矩阵
for k in range(c): # 遍历每个通道
for i in range(out_h):# 遍历每个输出位置
for j in range(out_w):
stride = ConvPara["stride"] # 获得步长
output[k, i, j] = np.sum(image[k, i * stride:i * stride + 3, j * stride:j * stride + 3] * kernel)# 计算卷积操作
return output # 返回卷积计算后的数组(特征向量)
# if __name__ == '__main__':
# root = tk.Tk()
# root.withdraw()
# DataPara = setload_data()
# train_images, test_images = load_data(DataPara)
# ConvPara = setconv_para()
# conv_images = [] # 存储卷积处理后的图片的列表
# for image in train_images: # 获取训练集的图片数据
# dim = len(image.shape) # 获取矩阵的维度
# if dim == 2: # 如果是二维矩阵,则转化为三维矩阵
# image_h, image_w = image.shape
# image = np.reshape(image, (1, image_h, image_w))
# output = conv_proc(image, ConvPara) # 调用ConvProc()函数根据ConvPara参数完成卷积计算
# conv_images.append(output) # 将卷积结果存储到列表
# conv_images = np.array(conv_images) # 将卷积处理后的图片列表转换为数组形式,方便后续处理
# print(conv_images)
def convolutional_operation(images):
global ConvPara
# 存储卷积处理后的图片的列表
conv_images = []
# 获取训练集的图片数据
for image in images:
# 获取矩阵的维度
dim = len(image.shape)
# 如果是二维矩阵,则转化为三维矩阵
if dim == 2:
image_h, image_w = image.shape
image = np.reshape(image, (1, image_h, image_w))
# 调用ConvProc()函数根据ConvPara参数完成卷积计算
output = conv_proc(image, ConvPara)
# 将卷积结果存储到列表
conv_images.append(output)
# 若为三维矩阵,则保持不变直接卷积处理
elif dim == 3:
output = conv_proc(image, ConvPara)
conv_images.append(output)
# 将卷积处理后的图片列表转换为数组形式,方便后续处理
conv_images = np.array(conv_images)
return conv_images
'''
编程16.8编制程序设置池化参数SetPoolPara()及池化函数PoolProc()
目的及编程说明读者通过编程16.8可掌握池化函数的基本方法池化的作用是在卷积神经网络的特征提取过程中
对卷积后提取到的数组进行降维处理从而降低特征数组的维度和计算量SetPoolPara()设置相关参数
PoolProc ()按相关参数完成池化处理
SetPoolPara()实现的具体要求为
1确定参数信息池化的类型Poolmode池化窗口大小pool_size步长stride
2返回PoolPara参数
PoolProc()实现的具体要求为
1获取输入数据的形状
2获取池化窗口的大小
3创建一个输出结果数组计算池化后的结果大小
4对每个池化窗口应用池化函数判断池化类型池化模式为'max'
则使用窗口中的最大值池化模式为'avg'则使用窗口中的平均值
池化模式为'min'则使用窗口中的最小值
5返回池化计算后的数组
'''
def setpool_para(): # 定义设置池化参数的函数
pool_mode = "max" # 设置池大小和池类型这里假设是2x2最大池化
pool_size = 2
stride = 2 # 设置步长这里假设是2
PoolPara = {"pool_mode": pool_mode,"pool_size": pool_size,"stride": stride} # 返回PoolPara参数这里用一个字典来存储
return PoolPara # 返回PoolPara参数
# if __name__ == '__main__':
# PoolPara = setpool_para()
# print(PoolPara)
def pool_proc(image, PoolPara): # 定义池化函数
pool_mode = PoolPara["pool_mode"]
pool_size = PoolPara["pool_size"]
stride = PoolPara["stride"]
c, h, w = image.shape # 获取输入特征图的高度和宽度
out_h = int((h - pool_size) / stride) + 1 # 计算输出特征图的高度
out_w = int((w - pool_size) / stride) + 1 # 计算输出特征图的宽度
out = np.zeros((c, out_h, out_w)) # 初始化输出特征图为全零数组
for k in range(c): # 对于输出的每一个位置上计算:
for i in range(out_h):
for j in range(out_w):
window = image[k, i * stride:i * stride + pool_size, j * stride:j * stride + pool_size]
if pool_mode == "max": # 最大池化
out[k][i][j] = np.max(window)
elif pool_mode == "avg": # 平均池化
out[k][i][j] = np.mean(window)
elif pool_mode == "min": # 最小池化
out[k][i][j] = np.min(window)
else: # 无效的池化类型
raise ValueError("Invalid pooling mode")
return out # 返回特征图。
# if __name__ == '__main__':
# ···
# PoolPara = setpool_para()
# pool_images = [] # 存储池化处理后的图片的列表
# for image in conv_images: # 获取卷积后的图片数据
# output = pool_proc(image, PoolPara)
# pool_images.append(output) # 将池化结果存储到列表
# pool_images = np.array(pool_images) # 将池化处理后的图片列表转换为数组形式,方便后续处理
# print(pool_images)
def pooling_treatment(conv_images):
global PoolPara
pool_images = [] # 存储池化处理后的图片的列表
for image in conv_images: # 获取卷积后的图片数据
output = pool_proc(image, PoolPara)
pool_images.append(output) # 将池化结果存储到列表
pool_images = np.array(pool_images) # 将池化处理后的图片列表转换为数组形式,方便后续处理
return pool_images
'''
编程16.9编制程序设置全连接参数SetFullConnPara()及全连接函数FullConnProc()
目的及编程说明读者通过编程16.9可掌握全连接处理的基本方法全连接是将前面卷积和池化层提取到的数组转换为一个一维的数组
并将其传递给后面的分类器进行分类全连接层中的每个神经元都与前一层中的所有神经元相连
因此全连接层可以将前一层中提取的所有特征信息汇总到一个向量中为后续的分类任务提供更加丰富的特征表示
全连接层具有较高的参数数量和计算量SetFullConnPara()设置全连接相关参数FullConnProc()按相关参数完成全连接操作
SetFullConnPara()实现的具体要求为
1确定参数信息: 权重矩阵偏置向量
2返回FullConnPara参数
FullConnProc() 实现的具体要求为
1对输入进行展平处理变换为单通道的二维数组格式如果输入数据为三通道需要将其转换为单通道的二维数组形式
2计算全连接层的线性变换inputdata与权重矩阵w进行乘法再加上偏置向量b
3返回全连接计算后的数组
'''
def setfullconn_para(data): # 定义一个函数来设置全连接层的相关参数,这里可以根据实际情况修改或随机生成
c, height, width = data.shape# 获取池化后的图片数组的长度和宽度
num_outputs = 10 # 输出维度为10可以设置
weights = np.random.randn(num_outputs, height * width) * 0.1 # 设置权重矩阵
bias = np.random.randn(1, num_outputs) # 偏置向量
FullConnPara = {"weights": weights, "bias": bias, "num_outputs": num_outputs}
return FullConnPara # 返回FullConnPara参数
def fullconn_proc(inputdata, FullConnPara): # 定义一个函数来完成全连接操作
weights = FullConnPara["weights"] # 从FullConnPara参数中获取权重矩阵
bias = FullConnPara["bias"] # 偏置向量
inputdata = inputdata.reshape(1, inputdata.shape[1] * inputdata.shape[2]) # 对输入进行展平处理,变换为单通道的一维数组格式
output = np.dot(inputdata, weights.T) + bias # 计算全连接层的线性变换inputdata与权重矩阵w进行乘法再加上偏置向量b
return output # 返回全连接计算后的数组
# if __name__ == '__main__':
# ···
# FullConnPara = setfullconn_para(pool_images[0])
# fullconn_images = [] # 存储全连接处理后的图片的列表
# for image in pool_images: # 获取池化后的图片数据
# output = fullconn_proc(image, FullConnPara)
# fullconn_images.append(output) # 将全连接处理后的结果存储到列表
# fullconn_images = np.array(fullconn_images) # 将全连接处理后的图片列表转换为数组形式,方便后续处理
def fullyconnected_operation(pool_images):
global FullConnPara
# 存储全连接处理后的图片的列表
fullconn_images = []
# 获取池化后的图片数据
for image in pool_images:
output = fullconn_proc(image, FullConnPara)
# 将全连接处理后的结果存储到列表
fullconn_images.append(output)
# 将全连接处理后的图片列表转换为数组形式,方便后续处理
fullconn_images = np.array(fullconn_images)
return fullconn_images
'''
编程16.10编制程序设置非线性参数SetNonLPara()及非线性函数NonlinearProc()
目的及编程说明读者通过编程16.10可掌握非线性函数使用的基本方法它的主要作用是引入非线性特性从而使网络能够学习更加复杂的特征和模式
通过激活函数将线性组合的输出进行非线性变换使得网络可以处理非线性关系和非线性变换从而更好地适应数据的复杂性
主要包含的类型常用的非线性函数包括ReLUtanh和sigmoid等非线性函数的输入数据是特征向量非线性函数应该返回应用激活函数后的特征向量
SetNonLPara()设置相关参数NonlinearProc()按相关参数完成对输入数据的非线性处理操作
SetNonLPara()实现的具体要求为
1确定参数信息:非线性函数的类型
2返回NonLPara参数
NonlinearProc(inputdataNonLPara) 实现的具体要求为
(1)判断NonLPara进行相应的计算Sigmoid\Relu\Tanh;
(2)返回计算后的值
'''
def setnonl_para(): # 定义设置非线性参数的函数
# 可以选择"Sigmoid", "ReLU" 或 "Tanh"
nonlinearmode = "ReLU" # 确定参数信息:非线性函数的类型
NonLPara = {"nonlinearmode": nonlinearmode} # 返回NonLPara参数这里用一个字典来存储
return NonLPara # 返回NonLPara参数
def nonlinear_proc(inputdata, NonLPara): # 定义非线性函数
nonlinearmode = NonLPara["nonlinearmode"] # 从NonLPara参数中获取非线性函数类型
if nonlinearmode == "Sigmoid": # 判断nonlinearmode进行相应的计算
output = 1 / (1 + np.exp(-inputdata)) # Sigmoid函数将任何实数的输入映射到0和1之间的输出
elif nonlinearmode == "ReLU":
output = np.maximum(inputdata, 0) # ReLU函数将负数输入置为0而正数输入保持不变
elif nonlinearmode == "Tanh":
output = np.tanh(inputdata) # Tanh函数将任何实数的输入映射到-1和1之间的输出
else:
raise ValueError("Invalid nonlinear mode") # 非法的非线性类型,抛出异常
return output # 返回计算后的值
# if __name__ == '__main__':
# ···
# NonLPara = setnonl_para()
# nonlinear_images = []
# for image in fullconn_images: # 获取全连接处理后的图片数据
# output = nonlinear_proc(image, NonLPara)
# nonlinear_images.append(output) # 将非线性处理后的结果存储到列表
# nonlinear_images = np.array(nonlinear_images) # 将非线性处理后的图片列表转换为数组形式,方便后续处理
def activation(fullconn_images):
global NonLPara
# 存储非线性处理后的图片的列表
nonlinear_images = []
for image in fullconn_images: # 获取全连接处理后的图片数据
output = nonlinear_proc(image, NonLPara)
# 将非线性处理后的结果存储到列表
nonlinear_images.append(output)
# 将非线性处理后的图片列表转换为数组形式,方便后续处理
nonlinear_images = np.array(nonlinear_images)
return nonlinear_images
'''
编程16.11编制程序设置分类函数参数SetClassifyPara()及分类函数ClassifierProc()
目的及编程说明读者通过编程16.11可掌握输出分类函数的基本方法
SetClassifyPara()设置相关参数ClassifierProc ()按相关参数完成对输入数据的分类标签设置
SetClassifyPara()实现的具体要求为
1设定阈值
2返回ClassifyPara参数
ClassifierProc()实现的具体要求为
1找到概率超过阈值的标签分类就是分类结果
2返回分类标签
'''
def setclassify_para(): # 定义设置分类函数参数的函数
threshold = 0.1 # 设定阈值,可以根据你的数据和任务来调整阈值
ClassifyPara = {"threshold": threshold} # 返回ClassifyPara参数这里用一个字典来存储
return ClassifyPara # 返回ClassifyPara参数
def classifier_proc(inputdata, ClassifyPara): # 定义分类函数
def softmax(x): # 定义softmax函数
x -= np.max(x) # 减去最大值,防止数值溢出
return np.exp(x) / np.sum(np.exp(x)) # 计算指数和归一化
threshold = ClassifyPara["threshold"] # 从ClassifyPara参数中获取阈值
output = -1 # 初始化输出为-1
prob = softmax(inputdata) # 调用softmax函数得到概率分布向量
prob1 = prob[prob >= threshold] # 如果概率高于阈值,就将该类别加入输出结果
index = np.where(prob == max(prob1)) # 使用where()函数来返回等于概率最大值的元素的索引
output = index[1].item(0) + 1 # 使用item()方法来将索引转换为标准Python标量
return output # 返回分类标签
# if __name__ == '__main__':
# ···
# classifier_images = [] # 存储分类处理后的图片的列表
# prob_images = [] # 存储分类处理后的概率向量
# for image in nonlinear_images: # 获取非线性处理后的图片数据
# def softmax(x): # 定义softmax函数
# x -= np.max(x) # 减去最大值,防止数值溢出
# return np.exp(x) / np.sum(np.exp(x)) # 计算指数和归一化
# prob = softmax(image) # 调用softmax函数得到概率分布向量
# prob_images.append(prob) # 将概率向量结果存储到列表
# classifypara = setclassify_para()
# output = classifier_proc(image, classifypara) # 进行分类处理
# classifier_images.append(output) # 将分类结果存储到列表
# classifier_images = np.array(classifier_images) # 将分类的结果列表转换为数组形式,方便后续处理
def classify(nonlinear_images):
global ClassifyPara
classifier_images = [] # 存储分类处理后的图片的列表
prob_images = [] # 存储分类处理后的概率向量
for image in nonlinear_images: # 获取非线性处理后的图片数据
def softmax(x): # 定义softmax函数
x -= np.max(x) # 减去最大值,防止数值溢出
return np.exp(x) / np.sum(np.exp(x)) # 计算指数和归一化
prob = softmax(image) # 调用softmax函数得到概率分布向量
prob_images.append(prob) # 将概率向量结果存储到列表
output = classifier_proc(image, ClassifyPara) # 进行分类处理
classifier_images.append(output) # 将分类结果存储到列表
classifier_images = np.array(classifier_images) # 将分类的结果列表转换为数组形式,方便后续处理
return classifier_images, prob_images
'''
编程16.12编制程序设置标签类别SetLabelPara()及为样本标记标签函数LabelProc()
目的及编程说明读者通过编程16.12为样本标记标签的过程SetLabelPara()实现的具体要求为
设置标签类别列表将标签列表转化为one-hot向量的形式
LabelProc()实现的具体要求为
1读取样本数据集遍历每个样本
2读取标签列表
3将样本和对应的标签组成元组返回标记好标签的样本列表
'''
def setlabel_para(): # 定义设置标签类别的函数
label_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # 设置标签类别列表,可以根据你的数据和任务来调整
one_hot_matrix = np.eye(len(label_list)) # 创建一个单位矩阵,大小为标签类别的个数
LabelPara = (label_list, one_hot_matrix) # 返回标签类别列表和one-hot矩阵这里用一个元组来存储
return LabelPara # 返回LabelPara参数
def label_proc(nonlinear_images,prob_images, classifier_images):# 定义为样本标记标签的函数
global LabelPara
label = []
for i in range(nonlinear_images.shape[0]):
label_list = np.append(prob_images[i], classifier_images[i]) # 连接起来形成一个新的数组
label.append(label_list) # label_list添加到label列表中
LABEL = np.array(label) # 使用np.array()将label列表转换为LABEL数组
sampledata = np.array(LABEL) # 读取样本数据集,假设是一个二维数组,每一行是一个样本,每一列是一个特征
label_list, one_hot_matrix = LabelPara # 读取标签列表和one-hot矩阵假设样本的最后一列是标签值
labeled_samples = [] # 遍历每个样本,将样本和对应的标签组成元组,返回标记好标签的样本列表
for sample in sampledata:
features = sample[:-1] # 获取样本的特征部分和标签部分
label = sample[-1]
index = label_list.index(label) # 先找到标签在标签列表中的索引位置
label = np.take(one_hot_matrix, index, axis=0) # 然后从one-hot矩阵中取出对应的向量
labeled_sample = (features, label) # 将样本和标签组成元组,并添加到列表中
labeled_samples.append(labeled_sample)
return labeled_samples # 返回标记好标签的样本列表
# if __name__ == '__main__':
# ···
# classifier_images = np.array(classifier_images) # 将分类的结果列表转换为数组形式,方便后续处理
# labeled_samples = label_proc(nonlinear_images, prob_images, classifier_images)
# print(labeled_samples)
'''
编程16.13编制程序设置误差参数SetErrorPara()及计算误差函数ErrorProc()
目的及编程说明读者通过编程20.13可掌握计算误差的基本方法
计算误差的函数损失函数通常有交叉熵Cross Entropy ErrorCEE
均方误差Mean Squared ErrorMSE
平均绝对误差Mean Absolute ErrorMAE
使用损失函数计算预测值与真实值之间的误差SetErrorPara()设置相关参数ErrorProc()按相关参数完成误差相关的计算
SetErrorPara()实现的具体要求为1确定参数信息: 标签类别损失函数类型
2返回ErrorProc参数
ErrorProc() 实现的具体要求为 1将真实标签类别label转换为one-hot编码形式
2确定损失函数类别实现不同的损失函数计算输入值与label之间的误差
3返回误差值loss
'''
def seterror_para(): # 定义设置误差参数的函数
label_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # 确定参数信息: 标签类别,损失函数类型
loss_type = "CEE" # 假设损失函数类型为交叉熵Cross Entropy ErrorCEE
ErrorPara = (label_list, loss_type) # 返回ErrorProc参数这里用一个元组来存储
return ErrorPara # 返回ErrorPara参数
def error_proc(input, label, ErrorPara): # 定义计算误差的函数
label_list, loss_type = ErrorPara # 读取标签列表和损失函数类型
one_hot_matrix = np.eye(len(label_list)) # 创建一个单位矩阵,大小为标签类别的个数
index = [label_list.index(l) for l in label] # 找到标签在标签列表中的索引位置
label_one_hot = np.take(one_hot_matrix, index, axis=0) # 从one-hot矩阵中取出对应的向量
if loss_type == "CEE": # 确定损失函数类别实现不同的损失函数计算输入值与label之间的误差
# 使用交叉熵损失函数,公式为:-sum(label_one_hot * log(input)) / n
loss = -np.sum(label_one_hot * np.log(input)) / len(input)
elif loss_type == "MSE":
# 使用均方误差损失函数公式为sum((input - label_one_hot) ** 2) / n
loss = np.sum((input - label_one_hot) ** 2) / len(input)
elif loss_type == "MAE":
# 使用平均绝对误差损失函数公式为sum(abs(input - label_one_hot)) / n
loss = np.sum(np.abs(input - label_one_hot)) / len(input)
else:
raise ValueError("Invalid loss type") # 如果损失函数类型不在以上三种中,抛出异常
return loss # 返回误差值loss
# if __name__ == '__main__':
# ···
# prob_images = np.squeeze(prob_images)
# input = prob_images
# array = np.arange(1, 11) # 定义一个一维数组包含1到10的整数
# right_label = np.random.choice(array, 900) # 从a中随机抽取100个元素有放回每个元素的概率相同
# ErrorPara = seterror_para() # 设置误差参数
# loss = error_proc(input, right_label, ErrorPara) # 计算误差值
# print(loss)
def loss_count(prob_images):
global ErrorPara
# 假设有以下输入值和真实标签值,输入值是一个概率矩阵,真实标签值是一个类别列表
prob_images = np.squeeze(prob_images)
input = prob_images # print(len(input))
array = np.arange(1, 11) # 定义一个一维数组包含1到10的整数
# 从a中随机抽取100个元素有放回每个元素的概率相同
right_label = np.random.choice(array, 900)
loss = error_proc(input, right_label, ErrorPara) # 计算误差值
return loss
'''
编程16.14编制程序设置卷积核调整函数的参数SetAjConvPara()卷积核调整函数AjConvProc()
目的及编程说明读者通过编程20.14可掌握更新卷积核的基本方法反向传播到卷积层的输出对于卷积核的每一个权重
可以计算出它对误差的贡献然后使用梯度下降算法更新该权重SetAjConvPara()设置相关参数AjConvProc()按相关参数完成卷积核调整相关的计算
SetAjConvPara()实现的具体要求为:1确定卷积调整的参数卷积核信息学习率误差值
2返回参数AjConvPara4.028404024493524
AjConvProc()的具体实现要求为1使用误差值计算卷积层的误差项和梯度2梯度下降算法更新卷积层参数3返回新的卷积层参数
'''
def setajconv_para():
kernel = np.array([[1.0,2.0],[3.0,4.0]]) # 卷积核信息
learning_rate = 0.01 # 学习率
bias = 0.5 # 偏置
loss = np.array([[0.1, 0.2], [0.3, 0.4]])
AjConvPara={'kernel_info': kernel,'learning_rate': learning_rate,'bias': bias,'loss': loss}
return AjConvPara
def ajconv_proc(input, AjConvPara):
k = [] # 计算卷积核和偏置项的梯度
bias_grad = np.sum(AjConvPara['loss']) # 计算偏置项梯度为误差值之和
for c in input:
kernel_grad = np.zeros_like(AjConvPara['kernel_info']) # 初始化卷积核梯度为零矩阵
for i in range(AjConvPara['loss'].shape[0]): # 遍历误差值矩阵的每一行
for j in range(AjConvPara['loss'].shape[1]): # 遍历误差值矩阵的每一列
kernel_grad += np.rot90(c[i:i + 2, j:j + 2], 2) * AjConvPara['loss'][i, j]
k.append(kernel_grad)
result = np.stack(k, axis=0)
kernel_grad = np.sum(result, axis=0) / len(input)
kernel = AjConvPara['kernel_info'] - AjConvPara['learning_rate'] * kernel_grad
bias = AjConvPara['bias'] - AjConvPara['learning_rate'] * bias_grad
return kernel, bias# 返回更新后的卷积核和偏置项
# if __name__ == '__main__':
# input = np.array([[[1,2,3],[4,5,6],[7,8,9]],
# [[10,11,12],[13,14,15],[16,17,18]],
# [[19,20,21],[22,23,24],[25,26,27]],
# [[28,29,30],[31,32,33],[34,35,36]]])
# AjConvPara=setajconv_para()
# kernel_new, bias_new = ajconv_proc(input, AjConvPara)
# print("\nkernel_new =\n", kernel_new,"\n")
# print("bias_new =", bias_new)
def setajfc_para():
AjFCPara = {
'weights': np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), # 全连接权重
'bias': np.array([0.5, 0.6]), # 全连接偏置
'learning_rate': 0.01, # 学习率
'error': np.array([0.001, 0.002]) # 误差值
}
return AjFCPara
def ajfullconn_proc(AjFCPara):
error_value = AjFCPara['error']
learning_rate = AjFCPara['learning_rate']
error_term = error_value # 全连接层的误差项等于误差值
gradient_weights = np.outer(error_term, learning_rate) # 计算权重梯度
gradient_bias = error_term * learning_rate # 计算偏置梯度
# 更新全连接层参数(梯度下降算法)
AjFCPara['weights'] -= gradient_weights
AjFCPara['bias'] -= gradient_bias
return AjFCPara
# if __name__ == '__main__':
# AjFCPara = setajfc_para()# 调用SetAjFCPara()设置参数
# print("初始化的全连接层参数:", AjFCPara)
# newajfcpara = ajfullconn_proc(AjFCPara)# 调用AjFullconnProc()更新全连接层参数
# print("更新后的全连接层参数:", newajfcpara)
def parameter():
global DataPara
global ConvPara
global PoolPara
global NonLPara
global ClassifyPara
global LabelPara
global ErrorPara
DataPara = setload_data() # 调用setload_data()函数,获取加载数据集的参数
ConvPara = setconv_para() # 调用SetConvPara()函数,获取卷积层参数
PoolPara = setpool_para() # 调用设置池化层参数的函数,获取池化参数
NonLPara = setnonl_para() # 调用设置非线性参数的函数,获取非线性参数
ClassifyPara = setclassify_para() # 设置分类函数参数
LabelPara = setlabel_para() # 设置标签类别参数
ErrorPara = seterror_para() # 设置误差参数
def main():
# 创建一个tkinter根窗口并隐藏它
root = tk.Tk()
root.withdraw()
parameter()
global DataPara
train_images, test_images = load_data(DataPara) # 调用LoadData()函数,根据参数加载数据集
conv_images = convolutional_operation(train_images) # 存储卷积处理后的图片的列表
pool_images = pooling_treatment(conv_images) # 存储还回池化处理后的图片的列表
global FullConnPara
FullConnPara = setfullconn_para(pool_images[0]) # 调用设置全连接层参数的函数,获取全连接参数
fullconn_images = fullyconnected_operation(pool_images) # 存储全连接处理后的图片的列表
nonlinear_images = activation(fullconn_images) # 存储非线性处理后的图片的列表
classifier_images, prob_images = classify(nonlinear_images) # 存储分类处理后的图片的列表
labeled_samples = label_proc(nonlinear_images, prob_images, classifier_images) # 读取之前的分类,每一行是一个样本,每一列是一个特征,将分类标签放在最后一列
loss = loss_count(prob_images)
# 打印结果
print(loss)
if __name__ == '__main__':
main()

@ -1,485 +0,0 @@
import os
import cv2
import numpy as np
import tkinter as tk
from tkinter import filedialog
from read_data import *
from PIL import Image
class ModelObj: # 网络对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
self.ObjID = ObjID # 图元号
self.ObjType = ObjType # 图元类别
self.ObjLable = ObjLable # 对象标签
self.ParaString = ParaString # 参数字符串
self.ObjX = ObjX # 对象位置x坐标
self.ObjY = ObjY # 对象位置y坐标
class Data_Class(ModelObj): # 数据集网络对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.LoadData = self.load_data # 基本操作函数
self.SetDataPara = self.SetLoadData # 参数设置函数
# 定义加载数据集load_data()
def load_data(self, DataPara):
global SubFolders
listimages = [] # 存储图片的列表
list_path = []
SubFolders, train_Path = read_folders(DataPara["train_imgPath"])
list_path.append(train_Path)
_, path_list = read_folders(DataPara["test_imgPath"])
list_path.append(path_list)
for data_path in list_path:
images = [] # 存储图片的列表
for path in data_path:
# print(path)
img = self.image_to_array(path, DataPara["img_width"], DataPara["img_height"]) # 读取图片数据
img = img.T # 转置,图像的行和列将互换位置。
images.append(img) # 将图片数组添加到训练集图片列表中
listimages.append(np.array(images)) # 返回转换后的数组
return listimages[0], listimages[1]
def SetLoadData(self):# 定义加载数据集的参数SetLoadData()
# 设置数据集路径信息
train_imgPath = 'data_classification/train/' # 训练集文件夹的位置
test_imgPath = 'data_classification/test/' # 测试集文件夹的位置
img_width = 48 # 图片宽度
img_height = 48 # 图片高度
# 设置每批次读入图片的数量
batch_size = 32 # 批次大小
# 返回DataPara参数这里用一个字典来存储
DataPara = {"train_imgPath": train_imgPath,
"test_imgPath": test_imgPath,
"img_width": img_width,
"img_height": img_height,
"batch_size": batch_size}
return DataPara
# 定义一个函数,实现图片转换为数组的功能
def image_to_array(self, path, height, width):
img = Image.open(path).convert("L") # 转换为灰度模式
img = img.resize((height, width)) # 将图片缩放为height*width的固定大小
data = np.array(img) # 将图片转换为数组格式
data = data / 255.0 # 将数组中的数值归一化除以255
return data
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.LoadData,
self.SetDataPara, self.ParaString, self.ObjX, self.ObjY]
return result
# if __name__ == '__main__':
# DataSet = Data_Class("DataSet1", 1, "数据集1", ".", 120, 330)
# print(DataSet)
class Conv_Class(ModelObj): # 卷积对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.ConvProc = self.conv_proc # 基本操作函数
self.SetConvPara = self.setconv_para # 参数设置函数
# 定义卷积函数ConvProc()
def conv_proc(self, image, ConvPara):
# 获取输入数据的大小,这里假设是单通道的图片
c, image_h, image_w = image.shape
kernel_h = ConvPara["kernel_h"] # 获取卷积核的大小和卷积核
kernel_w = ConvPara["kernel_w"]
kernel = ConvPara["kernel"]
out_h = (image_h - kernel_h) // ConvPara["stride"] + 1 # 计算输出数据的大小
out_w = (image_w - kernel_w) // ConvPara["stride"] + 1
output = np.zeros((c, out_h, out_w)) # 初始化输出数据为零矩阵
for k in range(c): # 遍历每个通道
for i in range(out_h): # 遍历每个输出位置
for j in range(out_w):
stride = ConvPara["stride"] # 获得步长
output[k, i, j] = np.sum(
image[k, i * stride:i * stride + 3, j * stride:j * stride + 3] * kernel) # 计算卷积操作
return output # 返回卷积计算后的数组(特征向量)
def setconv_para(self):# 定义设置卷积参数的函数SetConvPara()
kernel_h = 3 # 设置卷积核大小这里假设是3x3
kernel_w = 3
kernel = [[1.289202, -1.471377, -0.238452], # 卷积核为3x3的单位矩阵(高斯分布)
[-0.562343, -0.019988, -0.441446],
[1.627381, 1.390266, 0.812486]]
stride = 1 # 设置步长这里假设是1
padding = 0 # 设置填充这里假设是0
ConvPara = {"kernel": kernel, # 返回ConvPara参数这里用一个字典来存储
"kernel_h": kernel_h,
"kernel_w": kernel_w,
"stride": stride,
"padding": padding}
return ConvPara
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.ConvProc, self.SetConvPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class Pool_Class(ModelObj): # 池化对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.MaxPoolProc = self.pool_proc # 基本操作函数
self.SetPollPara = self.setpool_para # 参数设置函数
def pool_proc(self, image, PoolPara):
pool_mode = PoolPara["pool_mode"]
pool_size = PoolPara["pool_size"]
stride = PoolPara["stride"]
c, h, w = image.shape # 获取输入特征图的高度和宽度
out_h = int((h - pool_size) / stride) + 1 # 计算输出特征图的高度
out_w = int((w - pool_size) / stride) + 1 # 计算输出特征图的宽度
out = np.zeros((c, out_h, out_w)) # 初始化输出特征图为全零数组
for k in range(c): # 对于输出的每一个位置上计算:
for i in range(out_h):
for j in range(out_w):
window = image[k, i * stride:i * stride + pool_size, j * stride:j * stride + pool_size]
if pool_mode == "max": # 最大池化
out[k][i][j] = np.max(window)
elif pool_mode == "avg": # 平均池化
out[k][i][j] = np.mean(window)
elif pool_mode == "min": # 最小池化
out[k][i][j] = np.min(window)
else: # 无效的池化类型
raise ValueError("Invalid pooling mode")
return out # 返回特征图。
# 定义设置池化参数的函数
def setpool_para(self):
pool_mode = "max" # 设置池大小和池类型这里假设是2x2最大池化
pool_size = 2
stride = 2 # 设置步长这里假设是2
PoolPara = {"pool_mode": pool_mode, "pool_size": pool_size, "stride": stride} # 返回PoolPara参数这里用一个字典来存储
return PoolPara # 返回PoolPara参数
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.MaxPoolProc, self.SetPollPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class FullConn_Class(ModelObj): # 全连接对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.FullConnProc = self.fullconn_proc # 基本操作函数
self.SetFullConnPara = self.setfullconn_para # 参数设置函数
def fullconn_proc(self, inputdata, FullConnPara):
weights = FullConnPara["weights"] # 从FullConnPara参数中获取权重矩阵
bias = FullConnPara["bias"] # 偏置向量
inputdata = inputdata.reshape(1, inputdata.shape[1] * inputdata.shape[2]) # 对输入进行展平处理,变换为单通道的一维数组格式
output = np.dot(inputdata, weights.T) + bias # 计算全连接层的线性变换inputdata与权重矩阵w进行乘法再加上偏置向量b
return output # 返回全连接计算后的数组
# 定义一个函数来设置全连接层的相关参数,这里可以根据实际情况修改或随机生成
def setfullconn_para(self, data, num_outputs):
# 获取池化后的图片数组的长度和宽度
c, height, width = data
num_outputs = num_outputs
weights = np.random.randn(num_outputs, height * width)
bias = np.random.randn(1, num_outputs)
# 返回FullConnPara参数这里用一个字典来存储
FullConnPara = {"weights": weights, "bias": bias, "num_outputs": num_outputs}
return FullConnPara
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.FullConnProc, self.SetFullConnPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class Nonline_Class(ModelObj): # 非线性对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.NonlinearProc = self.nonlinear_proc # 基本操作函数
self.SetNonLPara = self.setnonl_para # 参数设置函数
# 定义非线性函数
def nonlinear_proc(self, inputdata, NonLPara):
nonlinearmode = NonLPara["nonlinearmode"] # 从NonLPara参数中获取非线性函数类型
if nonlinearmode == "Sigmoid": # 判断nonlinearmode进行相应的计算
output = 1 / (1 + np.exp(-inputdata)) # Sigmoid函数将任何实数的输入映射到0和1之间的输出
elif nonlinearmode == "ReLU":
output = np.maximum(inputdata, 0) # ReLU函数将负数输入置为0而正数输入保持不变
elif nonlinearmode == "Tanh":
output = np.tanh(inputdata) # Tanh函数将任何实数的输入映射到-1和1之间的输出
else:
raise ValueError("Invalid nonlinear mode") # 非法的非线性类型,抛出异常
return output # 返回计算后的值
# 定义设置非线性参数的函数
def setnonl_para(self):
# 可以选择"Sigmoid", "ReLU" 或 "Tanh"
nonlinearmode = "ReLU" # 确定参数信息:非线性函数的类型
NonLPara = {"nonlinearmode": nonlinearmode} # 返回NonLPara参数这里用一个字典来存储
return NonLPara # 返回NonLPara参数
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.NonlinearProc, self.SetNonLPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class Label: # 标签
# 设置标签类别列表并将其转化为one-hot向量的形式
def setlabel_para(self, label_list):
num_classes = len(label_list)
identity_matrix = np.eye(num_classes)
label_dict = {label: identity_matrix[i] for i, label in enumerate(label_list)}
return label_dict
# 读取样本数据集,遍历每个样本,并将样本和对应的标签组成元组,返回标记好标签的样本列表
def label_proc(self, samples, labels, label_dict):
labeled_samples = [(sample, label_dict[label]) for sample, label in zip(samples, labels)]
return labeled_samples
def label_array(self, i):
# 读取标签数据
path_csv = 'train.csv'
df = pd.read_csv(path_csv, header=None, skiprows=range(0, i * 32), nrows=(i + 1) * 32 - i * 32)
# print(df)
# 将标签数据转化成数组
right_label = df.iloc[:, 0].tolist()
right_label = list(map(int, right_label))
right_label = [x for x in right_label]
# print(right_label)
return right_label
class Classifier_Class(ModelObj): # 分类对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.ClassifierProc = self.classifier_proc # 基本操作函数
self.SetClassifyPara = self.setclassify_para # 参数设置函数
def classifier_proc(self, inputdata, ClassifyPara):
def softmax(x): # 定义softmax函数
x -= np.max(x) # 减去最大值,防止数值溢出
return np.exp(x) / np.sum(np.exp(x)) # 计算指数和归一化
threshold = ClassifyPara["threshold"] # 从ClassifyPara参数中获取阈值
output = -1 # 初始化输出为-1
prob = softmax(inputdata) # 调用softmax函数得到概率分布向量
prob1 = prob[prob >= threshold] # 如果概率高于阈值,就将该类别加入输出结果
index = np.where(prob == max(prob1)) # 使用where()函数来返回等于概率最大值的元素的索引
output = index[1].item(0) + 1 # 使用item()方法来将索引转换为标准Python标量
return output # 返回分类标签
# 定义设置分类函数参数的函数
def setclassify_para(self):
threshold = 0.1 # 设定阈值,可以根据你的数据和任务来调整阈值
ClassifyPara = {"threshold": threshold} # 返回ClassifyPara参数这里用一个字典来存储
return ClassifyPara # 返回ClassifyPara参数
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.ClassifierProc, self.SetClassifyPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class Error_Class(ModelObj): # 误差计算对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.ErrorProc = self.error_proc # 基本操作函数
self.SetErrorPara = self.seterror_para # 参数设置函数
def error_proc(self, input, label, ErrorPara):
label_list, loss_type = ErrorPara # 读取标签列表和损失函数类型
one_hot_matrix = np.eye(len(label_list)) # 创建一个单位矩阵,大小为标签类别的个数
index = [x for x in label]
# print(label)
label_one_hot = np.take(one_hot_matrix, index, axis=0) # 从one-hot矩阵中取出对应的向量
# print(label_one_hot)
if loss_type == "CEE": # 确定损失函数类别实现不同的损失函数计算输入值与label之间的误差
# 使用交叉熵损失函数,公式为:-sum(label_one_hot * log(input)) / n
loss = -np.sum(label_one_hot * np.log(input)) / len(input)
elif loss_type == "MSE":
# 使用均方误差损失函数公式为sum((input - label_one_hot) ** 2) / n
loss = np.sum((input - label_one_hot) ** 2) / len(input)
elif loss_type == "MAE":
# 使用平均绝对误差损失函数公式为sum(abs(input - label_one_hot)) / n
loss = np.sum(np.abs(input - label_one_hot)) / len(input)
else:
raise ValueError("Invalid loss type") # 如果损失函数类型不在以上三种中,抛出异常
return loss # 返回误差值loss
# 定义设置误差参数的函数
def seterror_para(self):
label_list = [0, 1, 2, 3, 4, 5, 6] # 确定参数信息: 标签类别,损失函数类型
loss_type = "CEE" # 假设损失函数类型为交叉熵Cross Entropy ErrorCEE
ErrorPara = (label_list, loss_type) # 返回ErrorProc参数这里用一个元组来存储
return ErrorPara # 返回ErrorPara参数
def output(self): # 输出方法
# 创建一个空列表
result = [self.ObjID, self.ObjType, self.ObjLable, self.ErrorProc, self.SetErrorPara, self.ParaString, self.ObjX,
self.ObjY]
return result
class AjConv_Class(ModelObj): # 卷积调整对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.AjConvProc = self.ajconv_proc # 基本操作函数
self.SetAjConvPara = self.setajconv_para # 参数设置函数
def ajconv_proc(self, images, AjConvPara):
kernel_grad_list = []
bias_grad = 0 # 初始化偏置项梯度为零
for c in images:
# 计算卷积核和偏置项的梯度
kernel_grad = np.zeros_like(AjConvPara['kernel_info']) # 初始化卷积核梯度为零矩阵
for i in range(AjConvPara['loss'].shape[0]): # 遍历误差值矩阵的行
for j in range(AjConvPara['loss'].shape[1]): # 遍历误差值矩阵的列
# 将输入数据数组中对应的子矩阵旋转180度与误差值相乘累加到卷积核梯度上
kernel_grad += np.rot90(c[i:i + kernel_grad.shape[0], j:j + kernel_grad.shape[0]], 2) * AjConvPara['loss'][i, j]
# 将误差值累加到偏置项梯度上
bias_grad += AjConvPara['loss'][i, j]
kernel_grad_list.append(kernel_grad)
# 使用stack函数沿着第0个轴把一百个a数组堆叠起来
result = np.stack(kernel_grad_list, axis=0)
kernel_grad = np.sum(result, axis=0) / len(images) # 沿着第0个维度求和
# 更新卷积核和偏置项参数
kernel = AjConvPara['kernel_info'] - AjConvPara['learning_rate'] * kernel_grad # 卷积核参数减去学习率乘以卷积核梯度
return kernel # 返回更新后的卷积核
def setajconv_para(self, loss, ConvPara):
kernel = ConvPara['kernel'] # 卷积核信息
learning_rate = 0.01 # 学习率
loss = np.array([[loss]])
AjConvPara = {'kernel_info': kernel, 'learning_rate': learning_rate, 'loss': loss}
return AjConvPara
class AjFullconn_Class(ModelObj): # 全连接调整对象
def __init__(self, ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY):
super().__init__(ObjID, ObjType, ObjLable, ParaString, ObjX, ObjY)
self.AjFullconnProc = self.ajfullconn_proc # 基本操作函数
self.SetAjFCPara = self.setajfc_para # 参数设置函数
def ajfullconn_proc(self, AjFCPara):
# 根据激活函数的参数选择相应的函数和导数
# 计算权重矩阵和偏置向量的梯度,使用链式法则
gradient_weights = np.outer(AjFCPara['loss'], AjFCPara['learning_rate'])
# 更新权重矩阵和偏置向量
weight_matrix = AjFCPara['weights'] - gradient_weights
bias_vector = AjFCPara['bias'] - AjFCPara['learning_rate'] * AjFCPara['bias']
# 返回更新后的权重矩阵和偏置向量
return weight_matrix, bias_vector
def setajfc_para(self, loss, FullConnPara):
weights = FullConnPara["weights"]
bias = FullConnPara["bias"]
loss = np.array([loss])
AjFCPara = {
'weights': weights, # 全连接权重
'bias': bias, # 全连接偏置
'learning_rate': 0.01, # 学习率
'loss': loss # 误差值
}
return AjFCPara
def main():
DataPara = DataSet.SetDataPara() # setload_data()函数,获取加载数据集的参数
train_images, test_images = DataSet.LoadData(DataPara)
ConvPara = Conv.SetConvPara() # 调用SetConvPara()函数,获取卷积层参数
PoolPara = Pool.SetPollPara()
FullConnPara = FullConn.SetFullConnPara((1, 23, 23), 7)
NonLPara = Nonline.SetNonLPara()
ClassifyPara = Classifier.SetClassifyPara()
ErrorPara = Error.SetErrorPara()
LabelPara = Label()
# AjFCPara = AjFullconn.SetAjFCPara()
for i in range(len(train_images) // 32):
images = train_images[i * 32:(i + 1) * 32]
# 存储卷积处理后的图片的列表
conv_images = []
# 获取训练集的图片数据
for image in images:
# 获取矩阵的维度
dim = len(image.shape)
# 如果是二维矩阵,则转化为三维矩阵
if dim == 2:
image_h, image_w = image.shape
image = np.reshape(image, (1, image_h, image_w))
# 调用ConvProc()函数根据ConvPara参数完成卷积计算
output = Conv.ConvProc(image, ConvPara)
# 将卷积结果存储到列表
conv_images.append(output)
# 若为三维矩阵,则保持不变直接卷积处理
elif dim == 3:
output = Conv.ConvProc(image, ConvPara)
conv_images.append(output)
# 将卷积处理后的图片列表转换为数组形式,方便后续处理
conv_images = np.array(conv_images)
pool_images = [] # 存储池化处理后的图片的列表
for image in conv_images: # 获取卷积后的图片数据
output = Pool.MaxPoolProc(image, PoolPara)
pool_images.append(output) # 将池化结果存储到列表
pool_images = np.array(pool_images) # 将池化处理后的图片列表转换为数组形式,方便后续处理
# print(conv_images)
# print(pool_images[0].shape)
# 存储全连接处理后的图片的列表
fullconn_images = []
# 获取池化后的图片数据
for image in pool_images:
output = FullConn.FullConnProc(image, FullConnPara)
# 将全连接处理后的结果存储到列表
fullconn_images.append(output)
# 将全连接处理后的图片列表转换为数组形式,方便后续处理
fullconn_images = np.array(fullconn_images)
# print(fullconn_images)
# 存储非线性处理后的图片的列表
nonlinear_images = []
for image in fullconn_images: # 获取全连接处理后的图片数据
output = Nonline.NonlinearProc(image, NonLPara)
# 将非线性处理后的结果存储到列表
nonlinear_images.append(output)
# 将非线性处理后的图片列表转换为数组形式,方便后续处理
nonlinear_images = np.array(nonlinear_images)
# print(nonlinear_images)
classifier_images = [] # 存储分类处理后的图片的列表
prob_images = [] # 存储分类处理后的概率向量
def softmax(x): # 定义softmax函数
x -= np.max(x) # 减去最大值,防止数值溢出
return np.exp(x) / np.sum(np.exp(x)) # 计算指数和归一化
for image in nonlinear_images: # 获取非线性处理后的图片数据
prob = softmax(image) # 调用softmax函数得到概率分布向量
prob_images.append(prob) # 将概率向量结果存储到列表
output = Classifier.ClassifierProc(image, ClassifyPara) # 进行分类处理
classifier_images.append(output) # 将分类结果存储到列表
classifier_images = np.array(classifier_images) # 将分类的结果列表转换为数组形式,方便后续处理
print(classifier_images)
# print(setlabel_para())
label_dict = LabelPara.setlabel_para([0, 1, 2, 3, 4, 5, 6])
right_label = LabelPara.label_array(i)
labeled_samples = LabelPara.label_proc(images, right_label, label_dict)
print(right_label)
# 假设有以下输入值和真实标签值,输入值是一个概率矩阵,真实标签值是一个类别列表
prob_images = np.squeeze(prob_images)
# print(prob_images)
loss = Error.ErrorProc(prob_images, right_label, ErrorPara) # 计算误差值
print(loss)
AjConvPara = AjConv.SetAjConvPara(loss, ConvPara)
ConvPara['kernel'] = AjConv.AjConvProc(images, AjConvPara)
print(ConvPara['kernel'])
AjFCPara = AjFullconn.SetAjFCPara(loss, FullConnPara)
weight, bias = AjFullconn.AjFullconnProc(AjFCPara)
FullConnPara['weights'] = weight
FullConnPara['bias'] = bias
# print(weight, bias)
if __name__ == '__main__':
DataSet = Data_Class("DataSet1", 1, "数据集1", [], 120, 330)
Conv = Conv_Class("Conv1", 2, "卷积1", [], 250, 330)
Pool = Pool_Class("Pool1", 3, "最大池化1", [], 380, 330)
FullConn = FullConn_Class("FullConn1", 4, "全连接1", [], 510, 330)
Nonline = Nonline_Class("Nonline1", 5, "非线性函数1", [], 640, 330)
Classifier = Classifier_Class("Classifier1", 6, "分类1", [], 780, 330)
Error = Error_Class("Error1", 7, "误差计算1", [], 710, 124)
AjConv = AjConv_Class("AjConv1", 8, "卷积调整1", [], 250, 70)
AjFullconn = AjFullconn_Class("AjFullconn1", 9, "全连接调整1", [], 510, 120)
# AllModelObj = [DataSet, Conv, Pool, FullConn, Nonline, Classifier, Error, AjConv, AjFullconn]
main()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 KiB

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save