keras 源码中下载MNIST的方式是 path = get_file(path, origin='https://s3.amazonaws.com/img-datasets/mnist.npz'),数据源是通过 url = https://s3.amazonaws.com/img-datasets/mnist.npz 进行下载的。访问该 url 地址被墙了,导致 MNIST 相关的案例都卡在数据下载的环节。本文主要提供解决方案,让需要的读者可以跑案例的代码感受一下。
下载 mnist.npz 数据集
本文使用的 mnist.npz 数据集是通过一个 japan 的服务器下载得到的,在此免费分享给大家。如果下载有问题的话,可以留言哈。
下载链接:https://pan.baidu.com/s/1jH6uFFC密码: dw3d
下载后文件放在*.py所在目录下
改造原来的文件
#load data
#(X_train, y_train), (X_test, y_test) = mnist.load_data()
import numpy as np
path='./mnist.npz'
f = np.load(path)
X_train, y_train = f['x_train'], f['y_train']
X_test, y_test = f['x_test'], f['y_test']
f.close()
运行效果如下所示:
45568/48000 [===========================>..] - ETA: 4s - loss: 0.0574 - acc: 0.9818
45696/48000 [===========================>..] - ETA: 4s - loss: 0.0576 - acc: 0.9817
45824/48000 [===========================>..] - ETA: 4s - loss: 0.0576 - acc: 0.9817
45952/48000 [===========================>..] - ETA: 3s - loss: 0.0576 - acc: 0.9817
46080/48000 [===========================>..] - ETA: 3s - loss: 0.0576 - acc: 0.9817
46208/48000 [===========================>..] - ETA: 3s - loss: 0.0577 - acc: 0.9817
46336/48000 [===========================>..] - ETA: 3s - loss: 0.0576 - acc: 0.9817
46464/48000 [============================>.] - ETA: 2s - loss: 0.0578 - acc: 0.9817
46592/48000 [============================>.] - ETA: 2s - loss: 0.0577 - acc: 0.9817
46720/48000 [============================>.] - ETA: 2s - loss: 0.0578 - acc: 0.9817
46848/48000 [============================>.] - ETA: 2s - loss: 0.0578 - acc: 0.9817
46976/48000 [============================>.] - ETA: 1s - loss: 0.0579 - acc: 0.9817
47104/48000 [============================>.] - ETA: 1s - loss: 0.0580 - acc: 0.9816
47232/48000 [============================>.] - ETA: 1s - loss: 0.0580 - acc: 0.9816
47360/48000 [============================>.] - ETA: 1s - loss: 0.0580 - acc: 0.9817
47488/48000 [============================>.] - ETA: 0s - loss: 0.0579 - acc: 0.9817
47616/48000 [============================>.] - ETA: 0s - loss: 0.0578 - acc: 0.9817
47744/48000 [============================>.] - ETA: 0s - loss: 0.0578 - acc: 0.9817
47872/48000 [============================>.] - ETA: 0s - loss: 0.0577 - acc: 0.9818
48000/48000 [==============================] - 100s 2ms/step - loss: 0.0577 - acc: 0.9817 - val_loss: 0.0447 - val_acc: 0.9862
Test score: 0.03886812744811614
Test accuracy: 0.9869
完整代码如下:
# -*- coding: utf-8 -*-
__author__ = "TF大Q"
import numpy as np
np.random.seed(1337)
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
# 全局变量
batch_size = 128
nb_classes = 10
epochs = 5
# input image dimensions
img_rows, img_cols = 28, 28
# 卷积滤波器的数量
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
#load data
#(X_train, y_train), (X_test, y_test) = mnist.load_data()
import numpy as np
path='./mnist.npz'
f = np.load(path)
X_train, y_train = f['x_train'], f['y_train']
X_test, y_test = f['x_test'], f['y_test']
f.close()
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
#x为数据集的feature熟悉,y为label.
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size = 0.2)
# 根据不同的backend定下不同的格式
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_valid = X_valid.reshape(X_valid.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
X_valid = X_valid.reshape(X_valid.shape[0], img_rows, img_cols,1)
input_shape = (img_rows, img_cols, 1)
# 类型转换
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_valid = X_valid.astype('float32')
X_train /= 255
X_test /= 255
X_valid /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
print(X_valid.shape[0], 'valid samples')
# 转换为one_hot类型
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
Y_valid = np_utils.to_categorical(y_valid, nb_classes)
#构建模型
model = Sequential()
model.add(Convolution2D(nb_filters, (kernel_size[0], kernel_size[1]),
padding='same',
input_shape=input_shape)) # 卷积层1
model.add(Activation('relu')) #激活层
model.add(Convolution2D(nb_filters, (kernel_size[0], kernel_size[1]))) #卷积层2
model.add(Activation('relu')) #激活层
model.add(MaxPooling2D(pool_size=pool_size)) #池化层
model.add(Dropout(0.25)) #神经元随机失活
model.add(Flatten()) #拉成一维数据
model.add(Dense(128)) #全连接层1
model.add(Activation('relu')) #激活层
model.add(Dropout(0.5)) #随机失活
model.add(Dense(nb_classes)) #全连接层2
model.add(Activation('softmax')) #Softmax评分
#编译模型
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
#训练模型
model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(X_valid, Y_valid))
#评估模型
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
网友评论