# 导包
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 1.创造训练数据,定义运算过程
def create_data():
train_x = np.linspace(-1, 1, 100)
# print(*train_x)
train_y = 2 * train_x + np.random.randn(*train_x.shape) * 0.3
# plt.plot(train_x,train_y,'ro',label="Orginal data")
# plt.legend()
# plt.show()
# 2.占位X Y
X = tf.placeholder("float")
Y = tf.placeholder("float")
w = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.zeros([1]), name="bias")
z = tf.multiply(X, w) + b
return train_x, train_y, X, Y, w, b, z
# 反向优化
def reverse_trsf(Y, z):
cost = tf.reduce_mean(tf.square(Y - z))
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
return cost, optimizer
def start_nerve(train_x, train_y, X, Y, w, b, optimizer, cost):
# 初始化所有变量
init = tf.global_variables_initializer()
training_epochs = 20
display_step = 2
# 启动session
with tf.Session() as sess:
sess.run(init)
plotdata = {"batchsize": [], "loss": []}
for epoch in range(training_epochs):
# 向模型输入数据进行训练
for (x, y) in zip(train_x, train_y):
sess.run(optimizer, feed_dict={X: x, Y: y})
if epoch % display_step == 0:
loss = sess.run(cost, feed_dict={X: train_x, Y: train_y})
print("Epoch:", epoch + 1, 'cost', loss, 'w', sess
.run(w), 'b', sess.run(b))
if not (loss == "NA"):
plotdata["batchsize"].append(epoch)
plotdata["loss"].append(loss)
print("finished")
print("cost=", sess.run(cost, feed_dict={X: train_x, Y: train_y}),
'w=', sess.run(w), 'b=', sess.run(b))
if __name__ == "__main__":
train_x, train_y,X, Y, w, b, z = create_data()
cost, optimizer = reverse_trsf(Y, z)
start_nerve(train_x, train_y, X, Y, w, b, optimizer, cost)
网友评论