add: test and train data, main.c

main
高世波 1 year ago
parent f9c938b313
commit 7c6cd7207f

3
.gitignore vendored

@ -0,0 +1,3 @@
*.*~
*.out
*.exe

@ -0,0 +1,10 @@
0.111 0.112
0.001 0.999
0.123 0.345
0.123 0.456
0.123 0.789
0.234 0.567
0.234 0.678
0.387 0.401
0.616 0.717
0.701 0.919

@ -0,0 +1,30 @@
0 0 0
0 1 1
1 0 1
1 1 0
0.8 0.8 0
0.6 0.6 0
0.4 0.4 0
0.2 0.2 0
1.0 0.8 1
1.0 0.6 1
1.0 0.4 1
1.0 0.2 1
0.8 0.6 1
0.6 0.4 1
0.4 0.2 1
0.2 0 1
0.999 0.666 1
0.666 0.333 1
0.333 0 1
0.8 0.4 1
0.4 0 1
0 0.123 1
0.12 0.23 1
0.23 0.34 1
0.34 0.45 1
0.45 0.56 1
0.56 0.67 1
0.67 0.78 1
0.78 0.89 1
0.89 0.99 1

398
main.c

@ -0,0 +1,398 @@
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#define INNODE 2 // 输入层神经元个数
#define HIDENODE 10 // 隐藏层神经元个数
#define OUTNODE 1 // 输出层神经元个数
/**
*
*/
double StudyRate = 1.6;
/**
*
*/
double threshold = 1e-4;
/**
*
*/
int mostTimes = 1e6;
/**
*
*/
int trainSize = 0;
/**
*
*/
int testSize = 0;
/**
*
*/
typedef struct Sample{
double out[30][OUTNODE]; // 输出
double in[30][INNODE]; // 输入
}Sample;
/**
*
*/
typedef struct Node{
double value; // 当前神经元结点输出的值
double bias; // 当前神经元结点偏偏置值
double bias_delta; // 当前神经元结点偏置值的修正值
double *weight; // 当前神经元结点向下一层结点传播的权值
double *weight_delta; // 当前神经元结点向下一层结点传播的权值的修正值
}Node;
/**
*
*/
Node inputLayer[INNODE];
/**
*
*/
Node hideLayer[HIDENODE];
/**
*
*/
Node outLayer[OUTNODE];
double Max(double a, double b){
return a > b ? a : b;
}
/**
* sigmoid
* @param x
* @return
*/
double sigmoid(double x){
//请补全sigmod函数的计算结果
}
/**
*
* @param filename
* @return
*/
Sample * getTrainData(const char * filename){
Sample * result = (Sample*)malloc(sizeof (Sample));
FILE * file = fopen(filename, "r");
if(file != NULL){
int count = 0;
while (fscanf(file, "%lf %lf %lf", &result->in[count][0], &result->in[count][1], &result->out[count][0]) != EOF){
++count;
}
trainSize = count;
printf("%s The file has been successfully read!\n", filename);
fclose(file);
return result;
} else{
fclose(file);
printf("%s Encountered an error while opening the file!\n\a", filename);
return NULL;
}
}
/**
*
* @param filename
* @return
*/
Sample * getTestData(const char * filename){
/*在内存中分配足够的空间来存储一个Sample结构并将指向该内存块的指针存储在result变量中*/
Sample * result = (Sample*)malloc(sizeof (Sample));
FILE * file = fopen(filename, "r");//打开文件
if(file != NULL){
int count = 0; // 初始化一个整数变量count用于跟踪读取的数据行数
/*利用while循环从测试集文件中逐行读取两个浮点数直到读取到文件末尾。
count*/
while(fscanf(file, "%lf %lf", &result->in[count][0], &result->in[count][1]) != EOF){
++count;
}
testSize = count; //将最终的count的值存储在名为testSize的全局变量中以便后续使用
printf("%s The file has been successfully read!\n", filename);
fclose(file);
return result; //返回result
}else{
fclose(file);
printf("%s Encountered an error while opening the file!\n\a", filename);
return NULL;
}
}
/**
*
* @param data
* @param size
*/
void printData(Sample * data, int size){
int count
}
/**
*
*/
void init(){
// 设置时间戳为生成随机序列的种子
srand(time(NULL));
// 输入层的初始化
for (int i = 0; i < INNODE; ++i) {
inputLayer[i].weight = (double *)malloc(sizeof (double ) * HIDENODE);
inputLayer[i].weight_delta = (double *) malloc(sizeof (double ) * HIDENODE);
inputLayer[i].bias = 0.0;
inputLayer[i].bias_delta = 0.0;
}
// 输出层权值初始化
for (int i = 0; i < INNODE; ++i) {
for (int j = 0; j < HIDENODE; ++j) {
inputLayer[i].weight[j] = rand() % 10000 / (double )10000 * 2 - 1.0;
inputLayer[i].weight_delta[j] = 0.0;
}
}
// 初始化隐藏层结点
for (int i = 0; i < HIDENODE; ++i) {
/*为隐藏层节点 i 分配了一个 double 类型的数组,用于存储该节点向下一层节点传播的权重。
使malloc OUTNODE double
*/
hideLayer[i].weight =
/*为隐藏层节点 i 分配了一个用于存储权重修正值的数组。这个数组将在神经网络的训练过程中用于存储权重的更新值。
使malloc OUTNODE double
*/
hideLayer[i].weight_delta =
/*为隐藏层节点 i 初始化了一个随机的偏置值。
-1.0 1.0
*/
hideLayer[i].bias =
/*初始化了隐藏层节点 i 的偏置值修正值初始值为0.0。*/
hideLayer[i].bias_delta = 0.0;
}
// 初始化隐藏层权值
for (int i = 0; i < HIDENODE; ++i) {
for (int j = 0; j < OUTNODE; ++j) {
hideLayer[i].weight[j] = rand() % 10000 / (double )10000 * 2 - 1.0;
hideLayer[i].weight_delta[j] = 0.0;
}
}
for (int i = 0; i < OUTNODE; ++i) {
outLayer[i].bias = rand() % 10000 / (double )10000 * 2 - 1.0;
outLayer[i].bias_delta = 0.0;
}
}
/**
*
*/
void resetDelta(){
for (int i = 0; i < INNODE; ++i) {
for (int j = 0; j < HIDENODE; ++j) {
inputLayer[i].weight_delta[j] = 0.0;
}
}
for (int i = 0; i < HIDENODE; ++i) {
hideLayer[i].bias_delta = 0.0;
for (int j = 0; j < OUTNODE; ++j) {
hideLayer[i].weight_delta[j] = 0.0;
}
}
for (int i = 0; i < OUTNODE; ++i) {
outLayer[i].bias_delta = 0.0;
}
}
int main() {
// 初始化
init();
// 获取训练集
Sample * trainSample = getTrainData("TrainData.txt");
// printData(trainSample, trainSize);
for (int trainTime = 0; trainTime < mostTimes; ++trainTime) {
// 重置梯度信息
resetDelta();
// 当前训练最大误差
double error_max = 0.0;
// 开始训练累计bp
for (int currentTrainSample_Pos = 0; currentTrainSample_Pos < trainSize; ++currentTrainSample_Pos) {
// 输入自变量
for (int inputLayer_Pos = 0; inputLayer_Pos < INNODE; ++inputLayer_Pos) {
inputLayer[inputLayer_Pos].value = trainSample->in[currentTrainSample_Pos][inputLayer_Pos];
}
/** ----- 开始正向传播 ----- */
for (int hideLayer_Pos = 0; hideLayer_Pos < HIDENODE; ++hideLayer_Pos) {
double sum = 0.0;
for (int inputLayer_Pos = 0; inputLayer_Pos < INNODE; ++inputLayer_Pos) {
sum += inputLayer[inputLayer_Pos].value * inputLayer[inputLayer_Pos].weight[hideLayer_Pos];
}
sum -= hideLayer[hideLayer_Pos].bias;
hideLayer[hideLayer_Pos].value = sigmoid(sum);
}
for (int outLayer_Pos = 0; outLayer_Pos < OUTNODE ; ++outLayer_Pos) {
double sum = 0.0;
for (int hideLayer_Pos = 0; hideLayer_Pos < HIDENODE; ++hideLayer_Pos) {
/*计算每一个隐藏层节点的value和权值的乘积相加得到sum
*/
}
/*更新sum使sum减去偏置值;
*/
/*利用sigmod函数对得到的sum进行激活把激活后的结果赋值给对应的输出层节点value(outLayer[outLayer_Pos].value)。
*/
}
/** ----- 计算误差 ----- */
double error = 0.0;
for (int outLayer_Pos = 0; outLayer_Pos < OUTNODE; ++outLayer_Pos) {
double temp = fabs(outLayer[outLayer_Pos].value - trainSample->out[currentTrainSample_Pos][outLayer_Pos]);
// 损失函数
error += temp * temp / 2.0;
}
error_max = Max(error_max, error);
/** ----- 反向传播 ----- */
for (int outLayer_Pos = 0; outLayer_Pos < OUTNODE; ++outLayer_Pos) {
double bias_delta = -(trainSample->out[currentTrainSample_Pos][outLayer_Pos] - outLayer[outLayer_Pos].value)
* outLayer[outLayer_Pos].value * (1.0 - outLayer[outLayer_Pos].value);
outLayer[outLayer_Pos].bias_delta += bias_delta;
}
for (int hideLayer_Pos = 0; hideLayer_Pos < HIDENODE; ++hideLayer_Pos) {
for (int outLayer_Pos = 0; outLayer_Pos < OUTNODE; ++outLayer_Pos) {
double weight_delta = (trainSample->out[currentTrainSample_Pos][outLayer_Pos] - outLayer[outLayer_Pos].value)
* outLayer[outLayer_Pos].value * (1.0 - outLayer[outLayer_Pos].value)
* hideLayer[hideLayer_Pos].value;
hideLayer[hideLayer_Pos].weight_delta[outLayer_Pos] += weight_delta;
}
}
//
for (int hideLayer_Pos = 0; hideLayer_Pos < HIDENODE; ++hideLayer_Pos) {
double sum = 0.0;
for (int outLayer_Pos = 0; outLayer_Pos < OUTNODE; ++outLayer_Pos) {
sum += -(trainSample->out[currentTrainSample_Pos][outLayer_Pos] - outLayer[outLayer_Pos].value)
* outLayer[outLayer_Pos].value * (1.0 - outLayer[outLayer_Pos].value)
* hideLayer[hideLayer_Pos].weight[outLayer_Pos];
}
hideLayer[hideLayer_Pos].bias_delta += sum * hideLayer[hideLayer_Pos].value * (1.0 - hideLayer[hideLayer_Pos].value);
}
for (int inputLayer_Pos = 0; inputLayer_Pos < INNODE; ++inputLayer_Pos) {
for (int hideLayer_Pos = 0; hideLayer_Pos < HIDENODE; ++hideLayer_Pos) {
double sum = 0.0;
for (int outLayer_Pos = 0; outLayer_Pos < OUTNODE; ++outLayer_Pos) {
sum += (trainSample->out[currentTrainSample_Pos][outLayer_Pos] - outLayer[outLayer_Pos].value)
* outLayer[outLayer_Pos].value * (1.0 - outLayer[outLayer_Pos].value)
* hideLayer[hideLayer_Pos].weight[outLayer_Pos];
}
inputLayer[inputLayer_Pos].weight_delta[hideLayer_Pos] += sum * hideLayer[hideLayer_Pos].value * (1.0 - hideLayer[hideLayer_Pos].value)
* inputLayer[inputLayer_Pos].value;
}
}
}
// 判断误差是否达到允许误差范围
if(error_max < threshold){
printf("\a Training completed!Total training count:%d, maximum error is:%f\n", trainTime + 1, error_max);
break;
}
// 误差无法接受,开始修正
for (int inputLayer_Pos = 0; inputLayer_Pos < INNODE; ++inputLayer_Pos) {
for (int hideLayer_Pos = 0; hideLayer_Pos < HIDENODE; ++hideLayer_Pos) {
inputLayer[inputLayer_Pos].weight[hideLayer_Pos] += StudyRate
* inputLayer[inputLayer_Pos].weight_delta[hideLayer_Pos] /
(double) trainSize;
}
}
for (int hideLayer_Pos = 0; hideLayer_Pos < HIDENODE; ++hideLayer_Pos) {
hideLayer[hideLayer_Pos].bias += StudyRate
* hideLayer[hideLayer_Pos].bias_delta / (double )trainSize;
for (int outLayer_Pos = 0; outLayer_Pos < OUTNODE; ++outLayer_Pos) {
hideLayer[hideLayer_Pos].weight[outLayer_Pos] += StudyRate
* hideLayer[hideLayer_Pos].weight_delta[outLayer_Pos] / (double )trainSize;
}
}
for (int outLayer_Pos = 0; outLayer_Pos < OUTNODE; ++outLayer_Pos) {
outLayer[outLayer_Pos].bias += StudyRate
* outLayer[outLayer_Pos].bias_delta / (double )trainSize;
}
}
// 训练完成,读取测试集
Sample * testSample = getTestData("TestData.txt");
printf("The predicted results are as follows:\n");
for (int currentTestSample_Pos = 0; currentTestSample_Pos < testSize; ++currentTestSample_Pos) {
for (int inputLayer_Pos = 0; inputLayer_Pos < INNODE; ++inputLayer_Pos) {
inputLayer[inputLayer_Pos].value = testSample->in[currentTestSample_Pos][inputLayer_Pos];
}
for (int hideLayer_Pos = 0; hideLayer_Pos < HIDENODE; ++hideLayer_Pos) {
double sum = 0.0;
for (int inputLayer_Pos = 0; inputLayer_Pos < INNODE; ++inputLayer_Pos) {
sum += inputLayer[inputLayer_Pos].value * inputLayer[inputLayer_Pos].weight[hideLayer_Pos];
}
sum -= hideLayer[hideLayer_Pos].bias;
hideLayer[hideLayer_Pos].value = sigmoid(sum);
}
for (int outLayer_Pos = 0; outLayer_Pos < OUTNODE; ++outLayer_Pos) {
double sum = 0.0;
for (int hideLayer_Pos = 0; hideLayer_Pos < HIDENODE; ++hideLayer_Pos) {
sum += hideLayer[hideLayer_Pos].value * hideLayer[hideLayer_Pos].weight[outLayer_Pos];
}
sum -= outLayer[outLayer_Pos].bias;
outLayer[outLayer_Pos].value = sigmoid(sum);
}
for (int outLayer_Pos = 0; outLayer_Pos < OUTNODE; ++outLayer_Pos) {
testSample->out[currentTestSample_Pos][outLayer_Pos] = outLayer[outLayer_Pos].value;
}
}
printData(testSample, testSize);
return 0;
}
Loading…
Cancel
Save