自编码器简介
代码及详细注释
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 31 16:05:38 2017
@author: mml
"""
import numpy as np
# 数据预处理模块
import sklearn.preprocessing as prep
import tensorflow as tf
# 使用MNIST数据集
from tensorflow.examples.tutorials.mnist import input_data
# 参数初始化方法
# 自动根据某一层网络的输入,输出节点数量自动调整最合适分布
# fan_in输入节点数量 fan_out输出节点数量
def xavier_init(fan_in,fan_out,constant = 1):
low = -constant*np.sqrt(6.0/(fan_in+fan_out))
high = constant*np.sqrt(6.0/(fan_in+fan_out))
# tf.random_uniform创建一个low到high之间的均匀分布
return tf.random_uniform((fan_in,fan_out),minval = low,maxval = high,dtype = tf.float32)
class AdditiveGaussianNoiseAutoencoder(object):
# 构建函数 输入变量数,隐层节点数 激活函数 优化器 scale高斯噪声系数
def __init__(self,n_input,n_hidden,transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(),scale=0.1):
# 输入变量数
self.n_input = n_input
# 隐层节点数
self.n_hidden = n_hidden
# 激活函数
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
# 参数初始化方法
network_weights = self._initialize_weights()
self.weights = network_weights
# 为输入x创建一个维度为n_input的placeholder
self.x = tf.placeholder(tf.float32,[None,self.n_input])
# 隐含层提取特征过程
# scale*tf.random_normal((n_input,))产生高斯噪声
# self.x + scale*tf.random_normal((n_input,)) 为输入加上高斯噪声
# tf.matmul(self.x + scale*tf.random_normal((n_input,)),self.weights['w1']) 加入噪声后的输入乘以权重
# tf.add(tf.matmul(self.x + scale*tf.random_normal((n_input,)),self.weights['w1']),self.weights['b1'])) 最后加上偏置
# self.transfer() 对结果进行激活函数处理
self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale*tf.random_normal
((n_input,)),self.weights['w1']),
self.weights['b1']))
# 经过隐含层提取特征后,我们需要在输出层进行数据复原重建操作
# 重构层直接把隐含层输出乘以输出层权重并加上偏置即可
self.reconstruction = tf.add(tf.matmul(self.hidden,
self.weights['w2']),self.weights['b2'])
# 自编码器的损失函数 平方误差作为cost
# tf.subtract(self.reconstruction,self.x) 重构后的输出和输入相减
# tf.pow求差的平方
# tf.reduce_sum求所有平方误差和
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(
self.reconstruction,self.x),2.0))
# 定义优化方法,对cost进行优化
self.optimizer = optimizer.minimize(self.cost)
# 全局参数初始化
init = tf.global_variables_initializer()
# 创建Session
self.sess = tf.Session()
self.sess.run(init)
# 参数初始化函数
def _initialize_weights(self):
# 创建一个所有参数的字典
all_weights = dict()
# w1使用前面的xavier_init初始化
all_weights['w1'] = tf.Variable(xavier_init(self.n_input,self.n_hidden))
# 其它都初始化为0
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden],dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden,self.n_input],dtype = tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input],dtype = tf.float32))
return all_weights
# 定义一个batch数据进行训练并返回当前cost
def partial_fit(self,X):
# 让Session执行计算流图节点cost和optimizer
# feed_dict为输入数据X和噪声系数
cost,opt = self.sess.run((self.cost,self.optimizer),
feed_dict = {self.x:X,self.scale:self.training_scale})
return cost
# 还需要一个只计算cost不训练的函数
def calc_total_cost(self,X):
# 让Session只触发计算流图节点self.cost
return self.sess.run(self.cost,feed_dict = {self.x:X,self.scale:self.training_scale})
# 还需函数返回隐含层输出结果(即提取的特征)
def transform(self,X):
# Session触发计算节点hidden
return self.sess.run(self.hidden,feed_dict = {self.x:X,
self.scale:self.training_scale})
# 定义函数进行单独重建(输入为隐含层输出)
def generate(self,hidden = None):
if hidden is None:
hidden = np.random.normal(size = self.weights['b1'])
return self.sess.run(self.reconstruction,feed_dict = {self.hidden:hidden})
# 定义完整的重建,包括前面的transform和reconstruction
def reconstruct(self,X):
return self.sess.run(self.reconstruction,feed_dict = {self.x:X,self.scale:self.training_scale})
# 获取隐含层参数
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
# 载入MINIST数据集
mnist = input_data.read_data_sets('MNIST_data',one_hot = True)
# 对训练和测试数据进行标准化处理
# 标准化让数据变成0均值,且标准差为1的分布
def standard_scale(X_train,X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train,X_test
# 定义获取随机block数据的方法
def get_random_block_from_data(data,batch_size):
# 取一个随机整数
start_index = np.random.randint(0,len(data)-batch_size)
# 顺序取到一个batch_size的数据
return data[start_index:(start_index + batch_size)]
# 使用之前的标准化函数对训练集和测试集进行标准化处理
X_train,X_test = standard_scale(mnist.train.images,mnist.test.images)
# 总训练样本数
n_samples = int(mnist.train.num_examples)
# 最大训练轮数
training_epochs = 20
# batchsize
batch_size = 128
# 每一轮显示一次cost
display_step = 1
# 创建AGN实例
# 输入784(mnist数据28*28)
autoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdagradOptimizer(learning_rate = 0.001),scale = 0.01)
# 开始真正的训练过程
for epoch in range(training_epochs):
avg_cost = 0.
# 计算总共的batch数
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
# 使用get_random_block_from_data获取随机batch数据
batch_xs = get_random_block_from_data(X_train,batch_size)
# 使用partial_fit进行训练,并返回cost
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print "Epoch:",'%04d' % (epoch + 1),"cost=","{:.9f}".format(avg_cost)
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
结果
网友评论