tf.truncated_normal 截断正态分布 tensorflow中默认的初始化方法
tf.random_normal 标准正态分布
tf.random_uniform 均匀分布
xavier_initializer() fully_connected默认的初始化方法
参考:http://www.tensorfly.cn/tfdoc/tutorials/mnist_pros.html
kernel1 = tf.nn.conv2d(image_holder, filter=weight1, strides=[1, 1, 1, 1], padding='SAME')
bias1 = tf.Variable(tf.constant(0.0, shape=[64]))
#下面三句话意思一样
conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1))
conv1 = tf.nn.relu(kernel1 + bias1)
conv1 = tf.nn.relu(tf.add(kernel1, bias1))
#全连接层
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
#dropout层
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#输出层
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
例子
# -*- coding: utf-8 -*-
"""
Created on Thu May 3 12:29:16 2018
@author: zy
"""
'''
优化卷积核 提高运算速度
'''
'''
建立一个带有全局平均池化层的卷积神经网络 并对CIFAR-10数据集进行分类
1.使用3个卷积层的同卷积操作,滤波器大小为5x5,每个卷积层后面都会跟一个步长为2x2的池化层,滤波器大小为2x2
2.对输出的10个feature map进行全局平均池化,得到10个特征
3.对得到的10个特征进行softmax计算,得到分类
'''
import cifar10_input
import tensorflow as tf
import numpy as np
# 2. 通过tf.get_variable函数来获取变量
def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
return weights
def weight_variable(shape):
'''
初始化权重
args:
shape:权重shape
'''
initial = tf.truncated_normal(shape=shape,mean=0.0,stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
'''
初始化偏置
args:
shape:偏置shape
'''
initial =tf.constant(0.1,shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
'''
卷积运算 ,使用SAME填充方式 卷积层后
out_height = in_hight / strides_height(向上取整)
out_width = in_width / strides_width(向上取整)
args:
x:输入图像 形状为[batch,in_height,in_width,in_channels]
W:权重 形状为[filter_height,filter_width,in_channels,out_channels]
'''
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
'''
最大池化层,滤波器大小为2x2,'SAME'填充方式 池化层后
out_height = in_hight / strides_height(向上取整)
out_width = in_width / strides_width(向上取整)
args:
x:输入图像 形状为[batch,in_height,in_width,in_channels]
'''
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def avg_pool_6x6(x):
'''
全局平均池化层,使用一个与原有输入同样尺寸的filter进行池化,'SAME'填充方式 池化层后
out_height = in_hight / strides_height(向上取整)
out_width = in_width / strides_width(向上取整)
args;
x:输入图像 形状为[batch,in_height,in_width,in_channels]
'''
return tf.nn.avg_pool(x,ksize=[1,6,6,1],strides=[1,6,6,1],padding='SAME')
def print_op_shape(t):
'''
输出一个操作op节点的形状
args:
t:必须是一个tensor类型
t.get_shape()返回一个元组 .as_list()转换为list
'''
print(t.op.name,'',t.get_shape().as_list())
'''
一 引入数据集
'''
batch_size = 128
learning_rate = 1e-4
training_step = 15000
display_step = 200
#数据集目录
data_dir = './cifar10_data/cifar-10-batches-bin'
print('begin')
#获取训练集数据
images_train,labels_train = cifar10_input.inputs(eval_data=False,data_dir = data_dir,batch_size=batch_size)
print('begin data')
'''
二 定义网络结构
'''
#定义占位符
input_x = tf.placeholder(dtype=tf.float32,shape=[None,24,24,3]) #图像大小24x24x
input_y = tf.placeholder(dtype=tf.float32,shape=[None,10]) #0-9类别
x_image = tf.reshape(input_x,[batch_size,24,24,3])
#1.卷积层 ->池化层
W_conv1 = weight_variable([5,5,3,64])
b_conv1 = bias_variable([64])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1) #输出为[-1,24,24,64]
print_op_shape(h_conv1)
h_pool1 = max_pool_2x2(h_conv1) #输出为[-1,12,12,64]
print_op_shape(h_pool1)
#2.卷积层 ->池化层 卷积核做优化
W_conv21 = weight_variable([5,1,64,64])
b_conv21 = bias_variable([64])
h_conv21 = tf.nn.relu(conv2d(h_pool1,W_conv21) + b_conv21) #输出为[-1,12,12,64]
print_op_shape(h_conv21)
W_conv2 = weight_variable([1,5,64,64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_conv21,W_conv2) + b_conv2) #输出为[-1,12,12,64]
print_op_shape(h_conv2)
h_pool2 = max_pool_2x2(h_conv2) #输出为[-1,6,6,64]
print_op_shape(h_pool2)
#3.卷积层 ->全局平均池化层
W_conv3 = weight_variable([5,5,64,10])
b_conv3 = bias_variable([10])
h_conv3 = tf.nn.relu(conv2d(h_pool2,W_conv3) + b_conv3) #输出为[-1,6,6,10]
print_op_shape(h_conv3)
nt_hpool3 = avg_pool_6x6(h_conv3) #输出为[-1,1,1,10]
print_op_shape(nt_hpool3)
nt_hpool3_flat = tf.reshape(nt_hpool3,[-1,10])
y_conv = tf.nn.softmax(nt_hpool3_flat)
'''
三 定义求解器
'''
#softmax交叉熵代价函数
cost = tf.reduce_mean(-tf.reduce_sum(input_y * tf.log(y_conv),axis=1))
#求解器
train = tf.train.AdamOptimizer(learning_rate).minimize(cost)
#返回一个准确度的数据
correct_prediction = tf.equal(tf.arg_max(y_conv,1),tf.arg_max(input_y,1))
#准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,dtype=tf.float32))
'''
四 开始训练
'''
sess = tf.Session();
sess.run(tf.global_variables_initializer())
# 启动计算图中所有的队列线程 调用tf.train.start_queue_runners来将文件名填充到队列,否则read操作会被阻塞到文件名队列中有值为止。
tf.train.start_queue_runners(sess=sess)
for step in range(training_step):
#获取batch_size大小数据集
image_batch,label_batch = sess.run([images_train,labels_train])
#one hot编码
label_b = np.eye(10,dtype=np.float32)[label_batch]
#开始训练
train.run(feed_dict={input_x:image_batch,input_y:label_b},session=sess)
if step % display_step == 0:
train_accuracy = accuracy.eval(feed_dict={input_x:image_batch,input_y:label_b},session=sess)
print('Step {0} tranining accuracy {1}'.format(step,train_accuracy))
网友评论