吴恩达DeepLearning课程作业,将其改成tensorflow版本,也是tensorflow的学习过程,记录一下。作业内容如下:
带一个隐藏层的planar data分类
from planar_utils import load_planar_dataset # 自带的加载数据方法
import tensorflow as tf
# 前向传播过程
def forward_propagation(input_tensor, W1, b1, W2, b2):
layer1 = tf.nn.tanh(tf.matmul(input_tensor, W1) + b1)
layer2 = tf.nn.sigmoid(tf.matmul(layer1, W2) + b2)
return layer2
def evaluate(labels, predictions):
ones = tf.ones_like(predictions)
zeros = tf.zeros_like(predictions)
predictions = tf.where(predictions < 0.5, x=zeros, y=ones)
labels = tf.cast(labels, tf.float32)
acc = tf.reduce_mean(tf.cast(tf.equal(labels, predictions), tf.float32))
return acc
labels为正确答案,值为0或者1;predictions为预测的结果,值在0-1之间,因此需要转为0或1再比较,小于0.5为0,大于0.5为1
#模型训练过程
def train(X, Y, n_h, num_iteration=10000, learning_rate=1.2):
n_x = X.shape[1]
n_y = Y.shape[1]
x = tf.placeholder(tf.float32, [None, n_x], name='x-input')
y_ = tf.placeholder(tf.float32, [None, n_y], name='y-input')
W1 =tf.Variable(tf.random_normal([n_x, n_h], stddev=0.01, seed=2))
b1 = tf.Variable(tf.zeros([n_h]))
W2 = tf.Variable(tf.random_normal([n_h, n_y], stddev=0.01, seed=2))
b2 = tf.Variable(tf.zeros([n_y]))
y = forward_propagation(x, W1, b1, W2, b2)
loss = - tf.reduce_mean(tf.log(tf.clip_by_value(y, 1e-10, 1.0)) * y_ + tf.log(tf.clip_by_value((1-y), 1e-10, 1.0)) * (1 - y_))
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
train_acc = evaluate(Y, y)
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(num_iteration):
sess.run(train_step, feed_dict={x: X, y_:Y})
if i % 1000 == 0:
total_loss = sess.run(loss, feed_dict={x:X, y_:Y})
w1 = sess.run(W1)
w2 = sess.run(W2)
total_b1 = sess.run(b1)
total_b2 = sess.run(b2)
acc = sess.run(train_acc, feed_dict={x:X, y_:Y})
print('Cost after interation %i: %f, accuracy is %f' % (i, total_loss, acc))
def main():
X, Y = load_planar_dataset()
train(X.T, Y.T, 4, num_iteration=20000)
网友评论