'''
Created on 2019年2月25日
@author: xiezhipu
'''
import sonnet as snt
import numpy as np
import tensorflow as tf
HiddenSize=[10,10,8,8]
config = tf.ConfigProto()
# 配置GPU内存分配方式,按需增长,很关键
config.gpu_options.allow_growth = True
# 配置可使用的显存比例
config.gpu_options.per_process_gpu_memory_fraction = 0.1
X=np.random.rand(500,2)*200
Y=np.matmul(X,np.array([[0.5],[0.3]]))+np.random.rand(500,1)*5
x=tf.placeholder(tf.float32, [None,2])
yTrue=tf.placeholder(tf.float32,[None,1])
layers=[]
for i in HiddenSize:
hidden_i=snt.Linear(output_size=i)
layers.append(hidden_i)
layers.append(tf.nn.relu)
hiddent_to_out=snt.Linear(output_size=1)
layers.append(hiddent_to_out)
mlp=snt.Sequential(layers)
train_predictions = mlp(x)
lossfun=tf.reduce_mean(tf.abs(tf.subtract(train_predictions/yTrue,1)))
train_step=tf.train.AdamOptimizer(learning_rate=0.15).minimize(lossfun)
with tf.Session(config = config) as sess:
init = tf.global_variables_initializer()
sess.run(init)
for i in range(int(1e3)):
_,loss = sess.run([train_step,lossfun],feed_dict={x:X,yTrue:Y})
print(loss)
网友评论