美文网首页
神经网络与机器学习,tensorflow,part4(多层感知机

神经网络与机器学习,tensorflow,part4(多层感知机

作者: miaozasnone | 来源:发表于2019-07-13 21:58 被阅读0次

# coding=utf-8

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

# 下载MNIST数据集到'MNIST_data'文件夹并解压
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
sess = tf.Session()


in_units=784
h1_units=300

# 设置隐含层权重w1和偏置b1,将权重初始化为标准差为0.1的正态分布,将偏置初始化为0
w1 =tf.Variable(tf.truncated_normal([in_units,h1_units],stddev=0.1))
b1 = tf.Variable(tf.zeros([h1_units]))
#设置输出层权重w2和偏置b2,初始值都设为0
w2 = tf.Variable(tf.zeros([784, 10]))
b2 = tf.Variable(tf.zeros([10]))


def run():
    # 开始训练
    init = tf.initialize_all_variables()
    sess.run(init)
    for i in range(3000):
        batch_xs, batch_ys = mnist.train.next_batch(100)                                # 每次随机选取100个数据进行训练,即所谓的“随机梯度下降(Stochastic Gradient Descent,SGD)”
        sess.run(train_step, feed_dict={x: batch_xs, y_real:batch_ys,keep_prob:0.75})                  # 正式执行train_step,用feed_dict的数据取代placeholder

        if i % 100 == 0:
            # 每训练100次后评估模型
            test()

def save():
    saver = tf.train.Saver()
    save_path = saver.save(sess, "/tmp/model.ckpt")
    print ("Model saved in file: ", save_path)
def test():
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.arg_max(y_real, 1))       # 比较预测值和真实值是否一致
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))             # 统计预测正确的个数,取均值得到准确率
    print (sess.run(accuracy, feed_dict={x: mnist.test.images, y_real: mnist.test.labels,keep_prob:1.0}))
    #   print(accuracy.eval({x: mnist.test.images, y_real: mnist.test.labels,keep_prob:1.0}))
def restore_():
    saver = tf.train.Saver()
    saver.restore(sess, "/tmp/model.ckpt")

# 构建模型
x = tf.placeholder(tf.float32, [None, in_units])

keep_prob=tf.placeholder(tf.float32)

hidden1=tf.nn.relu(tf.matmul(x,w1)+b1)
hidden1_drop=tf.nn.dropout(hidden1,keep_prob)
y = tf.nn.softmax(tf.matmul(x, w2) + b2)                                   # 模型的预测值
y_real = tf.placeholder(tf.float32, [None, 10])                                        # 真实值

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_real * tf.log(y),reduction_indices=[1]))    
#cross_entropy = -tf.reduce_sum(y_real*tf.log(tf.clip_by_value(y, 1e-10, 1.0)))                              # 预测值与真实值的交叉熵
train_step = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)        # 使用梯度下降优化器最小化交叉熵
#restore_()
#test()
run()






        

相关文章

网友评论

      本文标题:神经网络与机器学习,tensorflow,part4(多层感知机

      本文链接:https://www.haomeiwen.com/subject/bscvkctx.html