assign.py
import os
import numpy as np
import tensorflow.compat.v1 as tf
def add_batch_dimension(data,batch=1):
shape=data.shape
dim=len(shape)
if dim==1:
width=int(len(data)/batch)
return np.asarray(data[:]).reshape((batch,width))
elif dim==2:
height=int(shape[0]/batch)
width=shape[1]
return np.asarray(data[:]).reshape((batch,height,width))
else:
raise Exception("dimision error")
def print_info(sess,trainable_var,log_v=True):
for param in trainable_var:
print(param.name)
if log_v:
result=sess.run(param)
print(result)
#print(param.numpy()) #v2
class Model():
def __init__(self,input_size,output_size,size,name):
self.input= tf.placeholder(dtype=tf.float32, shape=(None, input_size))
with tf.variable_scope(name_or_scope=name):
self.W1 = tf.get_variable('W1', shape=[input_size, size], initializer=tf.glorot_uniform_initializer())
self.W2 = tf.get_variable('W2', shape=[size, size], initializer=tf.glorot_uniform_initializer())
self.W3 = tf.get_variable('W3', shape=[size, output_size], initializer=tf.glorot_uniform_initializer())
self.b1 = tf.Variable(tf.zeros([1], dtype=tf.float32))
self.b2 = tf.Variable(tf.zeros([1], dtype=tf.float32))
self.L1 = tf.nn.relu(tf.matmul(self.input, self.W1) + self.b1)
self.L2 = tf.nn.relu(tf.matmul(self.L1, self.W2) + self.b2)
self.Q= tf.matmul(self.L2, self.W3)
self.trainable_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, name)
class Model2():
def __init__(self,input_size,output_size,size,name):
self.input= tf.placeholder(dtype=tf.float32, shape=(None, input_size))
with tf.variable_scope(name_or_scope=name):
self.fc1 = tf.layers.dense(self.input, size, activation=tf.nn.relu)
self.fc2 = tf.layers.dense(self.fc1, size, activation=tf.nn.relu)
self.Q= tf.layers.dense(self.fc2, output_size, activation=tf.nn.relu)
self.trainable_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, name)
if __name__=='__main__':
os.environ["CUDA_VISIBLE_DEVICES"]="0"
tf.disable_v2_behavior()
sess=tf.Session()
model=Model(4,2,3,"eval")
target=Model(4,2,3,"target")
init = tf.initialize_all_variables()
sess.run(init)
print_info(sess,model.trainable_var)
print("\n\n")
print_info(sess,target.trainable_var)
print("\n\n")
for i in range(len(model.trainable_var)):
sess.run(target.trainable_var[i].assign(model.trainable_var[i]))
print_info(sess,target.trainable_var)
sess.close()
'''
input_size=4
name='test'
input= tf.placeholder(dtype=tf.float32, shape=(None, input_size))
with tf.variable_scope(name_or_scope=name):
W1 = tf.get_variable('W1', shape=[input_size,3], initializer=tf.glorot_uniform_initializer())
out=tf.matmul(input, W1)
trainable_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, name)
sess=tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
data=np.array([1,1,1,1])
in1=add_batch_dimension(data)
print_info(sess,trainable_var)
print("\n\n")
print(sess.run(out, feed_dict={input:in1}))
sess.close()
'''
网友评论