查看数据
from tensorflow.keras.datasets import mnist
import matplotlib.pyplot as plt
(train_x,train_y),(test_x,test_Y)=mnist.load_data()
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i+1 )
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_x[i], cmap=plt.cm.binary)
plt.xlabel(train_y[i])
plt.show()
output_1_0.png
处理数据 数据归一化
train_x=train_x.reshape((-1,28,28,1))/255.0
test_x=test_x.reshape((-1,28,28,1))/255.0
使用全连接model
import tensorflow as tf
from tensorflow.python.keras.utils import multi_gpu_model
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
#model=multi_gpu_model(model,gpus=2)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history=model.fit(train_x, train_y, epochs=5,batch_size=64,validation_data=(test_x,test_Y))
print(history.history)
Train on 60000 samples, validate on 10000 samples
Epoch 1/5
60000/60000 [==============================] - 3s 44us/step - loss: 0.2453 - acc: 0.9292 - val_loss: 0.1210 - val_acc: 0.9645
Epoch 2/5
60000/60000 [==============================] - 2s 39us/step - loss: 0.1045 - acc: 0.9685 - val_loss: 0.0873 - val_acc: 0.9742
Epoch 3/5
60000/60000 [==============================] - 2s 39us/step - loss: 0.0734 - acc: 0.9772 - val_loss: 0.0760 - val_acc: 0.9754
Epoch 4/5
60000/60000 [==============================] - 2s 39us/step - loss: 0.0544 - acc: 0.9827 - val_loss: 0.0626 - val_acc: 0.9808
Epoch 5/5
60000/60000 [==============================] - 2s 39us/step - loss: 0.0431 - acc: 0.9865 - val_loss: 0.0606 - val_acc: 0.9809
{'val_loss': [0.12101827680170536, 0.08727486125305295, 0.07602117696609348, 0.06258633336527274, 0.06062790886806324], 'val_acc': [0.9645, 0.9742, 0.9754, 0.9808, 0.9809], 'loss': [0.24529895100792248, 0.10447835950454076, 0.07340832727799813, 0.05438600577091177, 0.04305711637934049], 'acc': [0.9291833333333334, 0.96845, 0.9772, 0.9827166666666667, 0.9864833333333334]}
卷积神经网络model
import tensorflow as tf
from tensorflow.python.keras.utils import multi_gpu_model
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, kernel_size=(5, 5), input_shape=(28, 28, 1),activation='relu',name='conv2d-32'),
tf.keras.layers.MaxPool2D(strides=(2, 2),name='MaxPool2D-32'),
tf.keras.layers.Conv2D(64, kernel_size=(5, 5),activation='relu',name='Conv2D-64'),
tf.keras.layers.MaxPool2D(strides=(2, 2),name='MaxPool2D-64'),
tf.keras.layers.Flatten(name='Flatten'),
tf.keras.layers.Dropout(0.5,name='Dropout'),
tf.keras.layers.Dense(10, activation='softmax',name='Dense-10')
])
model.summary()
model=multi_gpu_model(model,gpus=2)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history=model.fit(train_x, train_y, epochs=10,validation_data=(test_x,test_Y))
print(history.history)
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d-32 (Conv2D) (None, 24, 24, 32) 832
_________________________________________________________________
MaxPool2D-32 (MaxPooling2D) (None, 12, 12, 32) 0
_________________________________________________________________
Conv2D-64 (Conv2D) (None, 8, 8, 64) 51264
_________________________________________________________________
MaxPool2D-64 (MaxPooling2D) (None, 4, 4, 64) 0
_________________________________________________________________
Flatten (Flatten) (None, 1024) 0
_________________________________________________________________
Dropout (Dropout) (None, 1024) 0
_________________________________________________________________
Dense-10 (Dense) (None, 10) 10250
=================================================================
Total params: 62,346
Trainable params: 62,346
Non-trainable params: 0
_________________________________________________________________
Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] - 10s 172us/step - loss: 0.1965 - acc: 0.9391 - val_loss: 0.0442 - val_acc: 0.9869
Epoch 2/10
60000/60000 [==============================] - 10s 160us/step - loss: 0.0712 - acc: 0.9780 - val_loss: 0.0314 - val_acc: 0.9904
Epoch 3/10
60000/60000 [==============================] - 10s 161us/step - loss: 0.0527 - acc: 0.9835 - val_loss: 0.0254 - val_acc: 0.9921
Epoch 4/10
60000/60000 [==============================] - 10s 161us/step - loss: 0.0439 - acc: 0.9862 - val_loss: 0.0216 - val_acc: 0.9928
Epoch 5/10
60000/60000 [==============================] - 10s 161us/step - loss: 0.0376 - acc: 0.9881 - val_loss: 0.0246 - val_acc: 0.9922
Epoch 6/10
60000/60000 [==============================] - 10s 160us/step - loss: 0.0353 - acc: 0.9888 - val_loss: 0.0188 - val_acc: 0.9937
Epoch 7/10
60000/60000 [==============================] - 10s 161us/step - loss: 0.0308 - acc: 0.9902 - val_loss: 0.0202 - val_acc: 0.9939
Epoch 8/10
60000/60000 [==============================] - 10s 161us/step - loss: 0.0291 - acc: 0.9911 - val_loss: 0.0235 - val_acc: 0.9923
Epoch 9/10
60000/60000 [==============================] - 10s 161us/step - loss: 0.0265 - acc: 0.9913 - val_loss: 0.0200 - val_acc: 0.9940
Epoch 10/10
60000/60000 [==============================] - 10s 161us/step - loss: 0.0259 - acc: 0.9917 - val_loss: 0.0228 - val_acc: 0.9918
{'val_loss': [0.04419064577044919, 0.031431237593130205, 0.025389306041694364, 0.02164296607077413, 0.024627994345039768, 0.01881986585248378, 0.02021861595758237, 0.023530831995893096, 0.020035049462070312, 0.022779195395996793], 'val_acc': [0.9869, 0.9904, 0.9921, 0.9928, 0.9922, 0.9937, 0.9939, 0.9923, 0.994, 0.9918], 'loss': [0.1964585481060048, 0.07116204217337072, 0.05266052842771945, 0.04390566666952024, 0.037550158609023006, 0.03534947692871792, 0.030789569879024446, 0.029085736439652586, 0.026527962450721924, 0.025876243178133154], 'acc': [0.9391166666666667, 0.97805, 0.9835, 0.9862166666666666, 0.9880666666666666, 0.9887666666666667, 0.9902, 0.9910833333333333, 0.99135, 0.9917333333333334]}
可视化model
from IPython.display import SVG
from tensorflow.python.keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model).create(prog='dot', format='svg'))
1544417740354.jpg
打印训练曲线
import matplotlib.pyplot as plt
history_dict = history.history
acc = history.history['acc']
loss = history.history['loss']
val_acc = history.history['val_acc']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Training val_loss')
# b is for "solid blue line"
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
acc_values = history_dict['acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Training val_acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
output_11_0.png
output_11_1.png
查看验证
import numpy as np
plt.figure(figsize=(10, 10))
p_label=np.argmax(model.predict(test_x[0:49]),axis=1)
p1_label=model.predict(test_x[0:49])
p_max_labels=np.max(p1_label, axis=1)
for i in range(49):
plt.subplot(7,7,i+1)
plt.imshow(test_x[i].reshape(28,28))
plt.xticks([])
plt.yticks([])
if p_label[i]==test_Y[i]:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.1f}% ({})".format(p_label[i],
100 * p_max_labels[i],
test_Y[i]),color=color)
output_13_0.png
看看那些错了
p_label=np.argmax(model.predict(test_x),axis=1)
result=np.equal(p_label,test_Y)
index_wrong=np.where(result==False)
print(index_wrong)
x=test_x[index_wrong]
right_y=test_Y[index_wrong]
wrong_y=np.argmax(model.predict(x),axis=1)
wrong_max=np.max(model.predict(x), axis=1)
print(wrong_y)
print(len(right_y))
print(wrong_max)
plt.figure(figsize=(12, 12))
for i in range(49):
plt.subplot(7,7,i+1)
plt.imshow(x[i].reshape(28,28))
plt.xticks([])
plt.yticks([])
if wrong_y[i]==right_y[i]:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.1f}% ({})".format(wrong_y[i],
100 * wrong_max[i],
right_y[i]),color=color)
(array([ 320, 340, 445, 449, 659, 684, 846, 947, 1014, 1039, 1112,
1138, 1226, 1232, 1242, 1247, 1260, 1522, 1621, 1709, 1901, 2035,
2070, 2130, 2293, 2329, 2414, 2447, 2462, 2488, 2597, 2654, 2770,
2896, 2927, 2939, 2995, 3030, 3225, 3384, 3422, 3520, 3534, 3558,
3730, 4027, 4176, 4400, 4731, 4761, 4807, 4814, 4823, 5937, 5997,
6172, 6569, 6576, 6597, 6625, 6651, 8408, 9015, 9642, 9664, 9679,
9698, 9729, 9770]),)
[8 3 0 5 1 2 9 9 5 1 6 1 2 4 9 5 1 9 6 5 4 3 9 9 6 2 4 9 0 4 3 1 5 0 2 5 8
0 9 6 0 4 8 0 9 1 7 1 2 8 0 0 4 3 9 5 2 1 7 2 8 5 2 7 7 2 5 6 0]
69
[0.5487968 0.7154917 0.9463902 0.95204204 0.6993093 0.61079836
0.9542614 0.789387 0.99403703 0.8320868 0.9761881 0.59718984
0.7508293 0.8380351 0.7072604 0.9447838 0.625205 0.66415906
0.5856947 0.95903456 0.85126615 0.9803823 0.6441936 0.9830293
0.7776802 0.8865479 0.7413854 0.5177151 0.71139973 0.7475217
0.9774415 0.9998882 0.7372158 0.9488201 0.6718568 0.8249474
0.5528 0.54504114 0.54805565 0.62762654 0.99735606 0.9991032
0.65194035 0.9706246 0.6190088 0.4959286 0.8655964 0.6018419
0.5978997 0.71149963 0.68390083 0.7572602 0.6328306 0.903725
0.5591738 0.5529782 0.6141367 0.96959394 0.6757054 0.54540163
0.9384903 0.4220014 0.6736965 0.5338367 0.58165085 0.41066182
0.47381133 0.9988312 0.58307064]
output_15_1.png
网友评论