Compare commits

..

2 Commits

Author SHA1 Message Date
Lashay Yang 57bb85b76a delete commit
6 years ago
Lashay Yang 09015874a0 second commit
6 years ago

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

@ -1,318 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"D:\\Program Files\\ANACOND3-5.2\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
" from ._conv import register_converters as _register_converters\n",
"Using TensorFlow backend.\n",
"D:\\Program Files\\ANACOND3-5.2\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:469: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
"D:\\Program Files\\ANACOND3-5.2\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:470: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
"D:\\Program Files\\ANACOND3-5.2\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:471: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
"D:\\Program Files\\ANACOND3-5.2\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:472: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
"D:\\Program Files\\ANACOND3-5.2\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:473: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
"D:\\Program Files\\ANACOND3-5.2\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:476: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
" np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"60000 train samples\n",
"10000 test samples\n"
]
}
],
"source": [
"'''Trains a simple deep NN on the MNIST dataset.\n",
"Gets to 98.40% test accuracy after 20 epochs\n",
"(there is *a lot* of margin for parameter tuning).\n",
"2 seconds per epoch on a K520 GPU.\n",
"'''\n",
"#import\n",
"from __future__ import print_function\n",
"\n",
"import keras\n",
"from keras.datasets import mnist\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Dropout\n",
"from keras.optimizers import RMSprop\n",
"from keras.utils import np_utils\n",
"\n",
"batch_size = 128\n",
"num_classes = 10\n",
"epochs = 20\n",
"\n",
"# the data, split between train and test sets\n",
"(x_train, y_train), (x_test, y_test) = mnist.load_data()\n",
"\n",
"x_train = x_train.reshape(60000, 784)\n",
"x_test = x_test.reshape(10000, 784)\n",
"x_train = x_train.astype('float32') #数据类型转换\n",
"x_test = x_test.astype('float32')\n",
"x_train /= 255 #数据归一化0,1\n",
"x_test /= 255\n",
"print(x_train.shape[0], 'train samples')\n",
"print(x_test.shape[0], 'test samples')\n",
"\n",
"#Using TensorFlow backend."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# convert class vectors to binary class matrices\n",
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
"y_test = keras.utils.to_categorical(y_test, num_classes)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"dense_1 (Dense) (None, 512) 401920 \n",
"_________________________________________________________________\n",
"dropout_1 (Dropout) (None, 512) 0 \n",
"_________________________________________________________________\n",
"dense_2 (Dense) (None, 512) 262656 \n",
"_________________________________________________________________\n",
"dropout_2 (Dropout) (None, 512) 0 \n",
"_________________________________________________________________\n",
"dense_3 (Dense) (None, 10) 5130 \n",
"=================================================================\n",
"Total params: 669,706\n",
"Trainable params: 669,706\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
"model = Sequential()\n",
"model.add(Dense(512, activation='relu', input_shape=(784,)))\n",
"model.add(Dropout(0.2))\n",
"model.add(Dense(512, activation='relu'))\n",
"model.add(Dropout(0.2))\n",
"model.add(Dense(num_classes, activation='softmax'))\n",
"\n",
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Train on 60000 samples, validate on 10000 samples\n",
"Epoch 1/20\n",
"60000/60000 [==============================] - 9s - loss: 0.2453 - acc: 0.9235 - val_loss: 0.1407 - val_acc: 0.9563\n",
"Epoch 2/20\n",
"60000/60000 [==============================] - 9s - loss: 0.1014 - acc: 0.9694 - val_loss: 0.0901 - val_acc: 0.9737\n",
"Epoch 3/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0750 - acc: 0.9771 - val_loss: 0.0843 - val_acc: 0.9739\n",
"Epoch 4/20\n",
"60000/60000 [==============================] - 10s - loss: 0.0609 - acc: 0.9820 - val_loss: 0.0722 - val_acc: 0.9802\n",
"Epoch 5/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0497 - acc: 0.9853 - val_loss: 0.0842 - val_acc: 0.9781\n",
"Epoch 6/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0439 - acc: 0.9870 - val_loss: 0.0790 - val_acc: 0.9810\n",
"Epoch 7/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0390 - acc: 0.9884 - val_loss: 0.0759 - val_acc: 0.9827\n",
"Epoch 8/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0335 - acc: 0.9899 - val_loss: 0.0874 - val_acc: 0.9814\n",
"Epoch 9/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0331 - acc: 0.9905 - val_loss: 0.0794 - val_acc: 0.9823\n",
"Epoch 10/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0304 - acc: 0.9917 - val_loss: 0.0835 - val_acc: 0.9845\n",
"Epoch 11/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0263 - acc: 0.9923 - val_loss: 0.0929 - val_acc: 0.9829\n",
"Epoch 12/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0256 - acc: 0.9926 - val_loss: 0.1003 - val_acc: 0.9816\n",
"Epoch 13/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0252 - acc: 0.9932 - val_loss: 0.1114 - val_acc: 0.9826\n",
"Epoch 14/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0239 - acc: 0.9936 - val_loss: 0.1034 - val_acc: 0.9821acc: 0.9\n",
"Epoch 15/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0230 - acc: 0.9938 - val_loss: 0.1117 - val_acc: 0.98230.993 - ETA: 0s - loss: 0.0232 - acc: 0.99\n",
"Epoch 16/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0210 - acc: 0.9941 - val_loss: 0.0968 - val_acc: 0.9841\n",
"Epoch 17/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0185 - acc: 0.9951 - val_loss: 0.1044 - val_acc: 0.9829\n",
"Epoch 18/20\n",
"60000/60000 [==============================] - 9s - loss: 0.0200 - acc: 0.9947 - val_loss: 0.0968 - val_acc: 0.9842\n",
"Epoch 19/20\n",
"60000/60000 [==============================] - 10s - loss: 0.0182 - acc: 0.9956 - val_loss: 0.1225 - val_acc: 0.9816\n",
"Epoch 20/20\n",
"60000/60000 [==============================] - 10s - loss: 0.0195 - acc: 0.9953 - val_loss: 0.1110 - val_acc: 0.9818\n",
"Test loss: 0.11099309864256515\n",
"Test accuracy: 0.9818\n"
]
}
],
"source": [
"model.compile(loss='categorical_crossentropy',\n",
" optimizer=RMSprop(),\n",
" metrics=['accuracy'])\n",
"\n",
"history = model.fit(x_train, y_train,\n",
" batch_size=batch_size,\n",
" epochs=epochs,\n",
" verbose=1,\n",
" validation_data=(x_test, y_test))\n",
"score = model.evaluate(x_test, y_test, verbose=0)\n",
"print('Test loss:', score[0])\n",
"print('Test accuracy:', score[1])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Saving model to disk \n",
"\n",
"Saved model to disk\n"
]
}
],
"source": [
"from keras.models import load_model\n",
"#Save the model\n",
"# # serialize model to JSON\n",
"# model_digit_json = model.to_json()\n",
"# with open(\"model_digit.json\", \"w\") as json_file:\n",
"# json_file.write(model_digit_json)\n",
"# # serialize weights to HDF5\n",
"# model.save_weights(\"model_digit.h5\")\n",
"# model.save('model_digit.h5') # creates a HDF5 file 'my_model.h5'\n",
"# save model\n",
"print(\"Saving model to disk \\n\")\n",
"mp = \"model_digit.h5\"\n",
"model.save(mp)\n",
"\n",
"print(\"Saved model to disk\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"from keras.models import load_model\n",
"\n",
"\n",
"# load json and create model\n",
"\n",
"# json_file = open('model_digit.json', 'r')\n",
"\n",
"# loaded_model_json = json_file.read()\n",
"\n",
"# json_file.close()\n",
"\n",
"# loaded_model = model_from_json(loaded_model_json)\n",
"\n",
"# loaded_model.load_weights(\"model_digit.h5\")\n",
"\n",
"# print(\"Loaded model from disk\")\n",
"\n",
"model = load_model('model_digit.h5') #选取自己的.h模型名称\n",
"\n",
"image = cv2.imread('2.png')\n",
"img = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY) # RGB图像转为gray\n",
"\n",
"img1 = x_test[500]\n",
"\n",
"#需要用reshape定义出例子的个数图片的 通道数图片的长与宽。具体的参看keras文档\n",
"img = (img1.reshape(1,784)).astype('float32')/255 \n",
"predict = model.predict_classes(img)\n",
"print ('识别为:')\n",
"print (predict)\n",
"\n",
"cv2.imshow(\"Image1\", image)\n",
"cv2.waitKey(0)\n",
"\n",
"# %matplotlib inline\n",
"# import matplotlib\n",
"# import matplotlib.pyplot as plt\n",
"\n",
"# def plot_digit(data):\n",
"# image = data.reshape(28, 28)\n",
"# plt.imshow(image, cmap = matplotlib.cm.binary,\n",
"# interpolation=\"nearest\")\n",
"# plt.axis(\"off\")\n",
" \n",
"# some_index = 5000\n",
"\n",
"# plt.subplot(121) #1代表行2代表列所以一共有2个图1代表此时绘制第二个图。\n",
"# plot_digit(x_test[some_index])\n",
"# # plt.subplot(121); plot_digit(y_test[some_index])\n",
"# # save_fig(\"noisy_digit_example_plot\")\n",
"# plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Loading…
Cancel
Save