master
qidangge 3 years ago
parent 18aecda10f
commit 32a834777d

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/.." vcs="Git" />
</component>
</project>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.9 KiB

After

Width:  |  Height:  |  Size: 6.2 KiB

@ -1,2 +1,2 @@
model_checkpoint_path: "mnist.ckpt"
all_model_checkpoint_paths: "mnist.ckpt"
model_checkpoint_path: "mnist_cnn3.ckpt"
all_model_checkpoint_paths: "mnist_cnn3.ckpt"

Binary file not shown.

@ -51,7 +51,6 @@ def screenshot(*args):
im.save('1.jpg')
print("在使用%d模型"%model)
result = read_image.predeiction('1.jpg', model)
text.set(str(result))
# 此函数用于清空画布

@ -1,52 +1,82 @@
import tensorflow as tf
import tensorflow.keras as keras
import os
from matplotlib import pyplot as plt
from PIL import Image
import numpy as np
mnist = tf.keras.datasets.mnist
mnist = keras.datasets.mnist
# 载入mnist数据集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
# 归一化处理样本,把每个像素点的数据范围由[0-255]转为[0-1]
x_train = x_train/255.0
x_test = x_test/255.0
# 增加一个维度
x_train = tf.expand_dims(x_train, -1)
x_test = tf.expand_dims(x_test, -1)
def creat_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
model = keras.Sequential([
# 卷积层定义
keras.layers.Conv2D(32, 3, padding="SAME"),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
keras.layers.MaxPool2D(pool_size=(2, 2), strides=2),
keras.layers.Dropout(0.2),
# 分类器定义
keras.layers.Flatten(),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
model.compile(optimizer='adam', # 优化方法选用adam
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False),
# 指定每个批次训练误差的减小方法
metrics=["sparse_categorical_accuracy"])
# 评价函数
return model
def model_fit(model,check_save_path):
def eva_acc(str, model):
# 加载测试集判断准确率
model.load_weights(str)
final_loss, final_acc = model.evaluate(x_test, y_test, verbose=2)
print("Model accuracy: ", final_acc, ", model loss: ", final_loss)
def model_fit(model, check_save_path):
if os.path.exists(check_save_path+'.index'):
print("load modals...")
model.load_weights(check_save_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=check_save_path,
save_weights_only=True,
save_best_only=True)
ES_callback = keras.callbacks.EarlyStopping(
monitor="val_sparse_categorical_accuracy", # 数据的监视入口
min_delta=0.001, # 增大或减小的阈值只有大于这个部分才算作improvement。
patience=1, # 能够容忍多少个epo
# ch内都没有improvement。
mode='max' # auto, min, ,max三个可能。如果知道是要上升还是下降建议设置一下。
)
history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_split=0.15, validation_freq=1, callbacks=[cp_callback])
model.summary()
final_loss, final_acc = model.evaluate(x_test, y_test, verbose=2)
print("Model accuracy: ", final_acc, ", model loss: ", final_loss)
history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_split=0.15, validation_freq=1, callbacks=[cp_callback, ES_callback])
# 输出参数计算
model.summary()
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
# 图形化
plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
@ -54,13 +84,9 @@ def model_fit(model,check_save_path):
plt.legend()
plt.show()
def eva_acc(str, model):
model.load_weights(str)
final_loss, final_acc = model.evaluate(x_test, y_test, verbose=2)
print("Model accuracy: ", final_acc, ", model loss: ", final_loss)
if __name__=="__main__":
check_save_path = "./checkpoint/mnist.ckpt"
check_save_path = "./checkpoint/mnist_cnn1.ckpt"
model = creat_model()
model_fit(model, check_save_path)
eva_acc(check_save_path, model)

@ -5,14 +5,16 @@ import tensorflow as tf
import matplotlib.pyplot as plt
mnist = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 数据归一化
x_train = x_train/255.0
x_test = x_test/255.0
# 为了卷积层输入格式,增加一个维度
x_train = tf.expand_dims(x_train, -1)
x_test = tf.expand_dims(x_test, -1)
print("train shape:", x_train.shape)
print("test shape:", x_test.shape)
# print("train shape:", x_train.shape)
# print("test shape:", x_test.shape)
# 使用此类进行图形增强
datagen = keras.preprocessing.image.ImageDataGenerator(
@ -24,6 +26,7 @@ datagen = keras.preprocessing.image.ImageDataGenerator(
horizontal_flip=False # 布尔值,随机水平翻转。
)
train_datagen = datagen.flow(
x_train,
y_train,
@ -38,20 +41,31 @@ validation_genetor = datagen.flow(
subset="validation"
)
def creat_model():
"""
此函数用于构建模型
调用keras.Sequential()建立模型的框架
使用compile()配置神经网络训练方法
:return:
"""
model = keras.Sequential([
keras.layers.Reshape((28, 28, 1)),
# 卷积层定义
keras.layers.Conv2D(filters=32, kernel_size=(5, 5), activation="relu", padding="same",
input_shape=(28, 28, 1)),
# 池化层定义
keras.layers.MaxPool2D((2, 2)),
# 卷积层、池化层
keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu", padding="same"),
keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu", padding="same"),
keras.layers.MaxPool2D((2, 2)),
# 卷积层、池化层
keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation="relu", padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation="relu", padding="same"),
keras.layers.MaxPool2D((2, 2)),
# 分类器:多个全连接层
keras.layers.Flatten(),
keras.layers.Dense(512, activation="sigmoid"),
keras.layers.Dropout(0.25),
@ -65,29 +79,53 @@ def creat_model():
keras.layers.Dense(10, activation="sigmoid")
])
# 使用compile()配置神经网络训练方法
# model.compile(optimizer=优化器,
# loss=损失函数,
# metrics=["评测指标"])
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=["sparse_categorical_accuracy"])
return model
def model_fit(model, check_save_path):
# 判断是否有保存的模型,有就加载之
if os.path.exists(check_save_path+'.index'):
print("load modals...")
model.load_weights(check_save_path)
# 使用callbacks.ModelCheckpoint()保存模型
# tf.keras.callbacks.ModelCheckpoint(filepath=保存模型的路径,
# save_weights_only=若设置为True则仅保存模型权重,设置为False则保存整个模型
# save_best_only=若设置为True将只保存在验证集上性能最好的模型
# )
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=check_save_path,
save_weights_only=True,
save_best_only=True)
# 用于动态调整学习率,参数如下:
# monitor监测的值可以是accuracyval_loss,val_accuracy
# factor缩放学习率的值学习率将以lr = lr*factor的形式被减少
# patience当patience个epoch过去而模型性能不提升时学习率减少的动作会被触发
# min_lr学习率最小值能缩小到的下限
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=5,
patience=2,
min_lr=0.000001,
verbose=1)
history = model.fit(train_datagen, epochs=1, validation_data=validation_genetor, callbacks=[reduce_lr,cp_callback],verbose=1)
# 早停机制
# monitor要监测的数量
# min_delta在被监测的数据中被认为是提升的最小变化即绝对变化小于min_delta将被视为没有提升。
# patience没有进步的训练轮数在这之后训练就会被停止。
earlystop_callback = keras.callbacks.EarlyStopping(
monitor='val_sparse_categorical_accuracy',
min_delta=0.0001,
patience=2)
history = model.fit(train_datagen, epochs=10, validation_data=validation_genetor, callbacks=[reduce_lr, cp_callback, earlystop_callback], verbose=1)
model.summary()
# 用于可视化
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
@ -106,7 +144,7 @@ def model_fit(model, check_save_path):
plt.legend()
plt.show()
# 此函数通过测试集检测准确率
def model_valtest(model, check_save_path):
model.load_weights(check_save_path)
final_loss, final_acc = model.evaluate(x_test, y_test, verbose=2)
@ -116,5 +154,5 @@ if __name__ == "__main__":
check_save_path = "./checkpoint/mnist_cnn3.ckpt"
model = creat_model()
# model_fit(model, check_save_path)
#model_fit(model, check_save_path)
model_valtest(model, check_save_path)
Loading…
Cancel
Save