CNN模型搭建(二)
继续上周未完成的工作:
# 为400维的输入向量占位,在CNN中为20 * 20的矩阵
xs = tf.placeholder(tf.float32, [None, 400])
# 为3维的输出向量占位,分别为“好”,“中”,“差”
ys = tf.placeholder(tf.float32, [None, 3])
# keep_prob: 设置神经元被选中的概率,在初始化时keep_prob是一个占位符, keep_prob = tf.placeholder(tf.float32) 。
# keep_prob用于dropout策略,减轻过拟合
keep_prob = tf.placeholder(tf.float32)
# 中间层输入
x_input = tf.reshape(xs, [-1, 16, 16, 1])
设计卷积层:
# 第一层 卷积层
# 卷积核大小为5 * 5
W_conv1 = weight_variable([5, 5, 1, 24])
b_conv1 = bias_variable([24])
# 计算激活函数relu,即max(features, 0)。即将矩阵中每行的负值置0,正值不变
# out size 20 * 20 * 12
h_conv1 = tf.nn.relu(conv2d(x_input, W_conv1) + b_conv1)
# 第二层 池化层 进行max pool
# out size 10 * 10 * 24
h_pool1 = max_pool_2x2(h_conv1)
# 第三层 卷积层
# 卷积核大小为5 * 5
W_conv2 = weight_variable([5, 5, 24, 48]) # patch 5*5, in size 32, out size 64
b_conv2 = bias_variable([64])
# 计算激活函数relu,即max(features, 0)。即将矩阵中每行的负值置0,正值不变
# out size 10 * 10 * 24
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# 第四层 池化层 进行max pool
# out size 5 * 5 * 24
h_pool2 = max_pool_2x2(h_conv2)
建立全连接层:
# 第五层 全连接层
W_fc1 = weight_variable([3 * 3 * 48, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 3 * 3 * 48])
# tf.matmul 矩阵相乘
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
使用Soft_max函数,输出预测标签
# 第六层 Softmax层,输出预测标签
W_fc2 = weight_variable([1024, 3])
b_fc2 = bias_variable([3])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# 训练m次 每n次输出一次准确率
for i in range(m):
while (step + 1) * batch < len(training_xs):
temp_xs = []
temp_ys = []
for k in range(batch):
temp_xs.append(training_xs[step * batch + k])
temp_ys.append(training_ys[step * batch + k])
step = step + 1
sess.run(train_step, feed_dict={xs: temp_xs, ys: temp_ys, keep_prob: 0.5})
if i % n == 0:
# 计算准确率
print(compute_accuracy(test_xs, test_ys))
网友评论