《机器学习》-自编码器
#简单的自编码器
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
import numpy as np
n_inputs = 3 #3D_inputs
n_hidden = 2 #2D codings
n_ouputs = n_inputs
learning_rate = 0.01
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = fully_connected(X,n_hidden, activation_fn=None)
outputs = fully_connected(hidden, n_ouputs, activation_fn=None)
reconstruction_loss = tf.reduce_mean(tf.square(outputs-X)) #MSE
optimizer = tf.train.AdamOptimizer(reconstruction_loss)
training_op = optimizer.minimize(reconstruction_loss)
init = tf.global_variables_initializer()
#training
X_train,X_test = (np.random.rand(100,3),np.random.rand(10,3))
n_iterations = 1000
codings = hidden
codings_val = None
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
training_op.run(feed_dict={X: X_train})
codings_val = codings.eval(feed_dict={X:X_test})
res_val = outputs.eval(feed_dict={hidden:codings_val})
#SAE 栈自编码器
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from tensorflow.examples.tutorials.mnist.input_data import read_data_sets
import numpy as np
mnist=read_data_sets("./",one_hot=True)
n_inputs = 28*28 # for mnist
n_hidden1 = 300
n_hidden2 = 150 #codings
n_hidden3 = n_hidden1
n_ouputs = n_inputs
learning_rate = 0.01
l2_reg = 0.001
X = tf.placeholder(tf.float32,shape=[None, n_inputs])
with tf.contrib.framework.arg_scope(
[fully_connected],
activation_fn=tf.nn.elu,
weights_initializer=tf.contrib.layers.variance_scaling_initializer(),
weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg)):
hidden1 = fully_connected(X,n_hidden1)
hidden2 = fully_connected(hidden1,n_hidden2)
hidden3 = fully_connected(hidden2, n_hidden3)
outputs = fully_connected(hidden3,n_ouputs,activation_fn=None)
reconstruction_loss = tf.reduce_mean(tf.square(outputs-X))
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([reconstruction_loss]+reg_loss)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
#训练
n_epochs = 5
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples
for iteration in range(n_batches):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X:X_batch})
网友评论