1.单层神经网络
#从TensorFlow中导入MNIST数据集
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
#定义变量
x = tf.placeholder(tf.float32,[None,784])
y_ = tf.placeholder(tf.float32,[None,10])
#创建神经网格
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
#二次代价函数
loss = tf.reduce_mean(tf.square(y_ - y))
#梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
#初始化变量
init = tf.global_variables_initializer()
#求准确率
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(init)
#对55000个训练数据进行21次训练
for n in range(21):
for i in range(550):
batch_xs,batch_ys=mnist.train.next_batch(100)
sess.run(train_step,feed_dict={x:batch_xs,y_:batch_ys})
acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y_:mnist.test.labels})
print("Iter " + str(n)+",Testing Accuracy "+str(acc))
- 通过以上的代码可以在测试数据上平均达到92% 的准确率
2.增加一个隐藏层
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
#每个批次的大小
batch_size = 100
#计算一共有多少个批次
n_batch = mnist.train.num_examples//batch_size
#定义几个placeholder
x = tf.placeholder(tf.float32,[None,784])
y_ = tf.placeholder(tf.float32,[None,10])
#keep_prob = tf.placeholder(tf.float32)
#创建一个含隐藏层的神经网络
w1 = tf.Variable(tf.truncated_normal([784,300],stddev=0.1))
w2 = tf.Variable(tf.zeros([300,10]))
b1 =tf.Variable(tf.zeros([300]))
b2 =tf.Variable(tf.zeros([10]))
L1=tf.nn.relu(tf.matmul(x,w1)+b1)
y =tf.nn.softmax(tf.matmul(L1,w2)+b2)
#二次代价函数
loss=tf.reduce_mean(tf.square(y_ - y))
#使用梯度下降法
train_step=tf.train.GradientDescentOptimizer(0.5).minimize(loss)
#初始化变量
init=tf.global_variables_initializer()
#结果存放在一个布尔型列表中
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
#求准确率
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(init)
for epoch in range(21):
for batch in range(n_batch):
batch_xs,batch_ys=mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict=({x:batch_xs,y_:batch_ys}))
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y_:mnist.test.labels})
print("Iter " + str(epoch)+",Testing Accuracy "+str(acc))
- 其它条件不变,只增加一个隐藏层,训练21次可以达到95%左右的准确率,训练200次可以达到接近98% 的准确率。
- 事实上,现在的Softmax Regression加入隐含层变成一个正统的神经网络后,再结合Dropout、Adagrad、ReLU等技术准确率可以达到98%。引入卷积层、池化层后,也可以达到99%的正确率。而目前基于卷积神经网络的state-of-the-art的方法已经可以达到99.8%的正确率。
引用:《TensorFlow实战》
网友评论