添加层
def add_layer(inputs,in_size,out_size,activation_function=None): #添加神经层
Weights=tf.Variable(tf.random_normal([in_size,outsize])) #行 列
biases=tf.Variable(tf.zeros([1.out_size])+0.1)
Wx_plus_b=tf.matmul(inputs,Weights)+biases #预测出来的值(未激活)
if activation_function is None:
outputs=Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
return outputs
建造神经网络
x_data=np.linsapace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape) #噪声
y_data=np.square(x_data)-0.5+noise
xs=tf.placeholder(tf.float32[None,1])
ys=tf.placeholder(tf.float32[None,1])
## input layer 1
## hidden layer 10
l1=add_layer(xs,1,10,activation_function=tf.tf.nn.relu)
## output layer
prediction=add_layer(ys,10,1,activation_function=None)
loss=tf.reduce_mean(tf.reduce_sum(tf.square(y_data-prediction),
reduction_indices=[1])) ##对每一个误差求和 再求平均
train_step=tf.train.GradientDeascentOptimizier(0.1).minimize(loss)#optimizer的作用是用0.1的学习率减少loss
init=tf.initialize_all_variables()
sess=tf.Session()
sess.run(init) # 开始进行运算
for i in range(1000):
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i%50==0:
print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
网友评论