GANs_1

作者: yanghedada | 来源:发表于2018-09-08 10:36 被阅读22次
    import pickle as pkl
    import numpy as np
    import tensorflow as tf
    import matplotlib.pyplot as plt
    
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets(r'E:\python\mnist_data')
    
    
    def model_inputs(real_dim, z_dim):
        inputs_real = tf.placeholder(tf.float32, (None,real_dim), name = 'inputs_real') # None denote the batch size
        inputs_z = tf.placeholder(tf.float32, (None,z_dim), name = 'inputs_z')
        return inputs_real, inputs_z
    
    def generator(z, out_dim, n_units=128, reuse=False,  alpha=0.01):
        ''' Build the generator network.
    
            z : Input tensor
            out_dim : Shape of the generator output
            n_units : Number of units in hidden layer
    
        '''
        with tf.variable_scope('generator',reuse=reuse):
            # hidden layer
            h1 = tf.layers.dense(z,n_units,activation=None)  # A fully connected layer
            # Leaky ReLU
            h1 = tf.maximum(alpha * h1,h1)
    
            # Logits and tanh output
            logits = tf.layers.dense(h1,out_dim, activation=None)
            out = tf.tanh(logits)
    
            return out
    
    def discriminator(x, n_units=128, reuse=False, alpha=0.01):
        ''' Build the discriminator network.
    
            x : Input tensor for the discriminator
            n_units: Number of units in hidden layer
    
        '''
        with tf.variable_scope('discriminator',reuse=reuse):
            # Hidden layer
            h1 = tf.layers.dense(x,n_units, activation=None)   #tf.add(tf.matmul(w1,x),b1)
            # Leaky ReLU
            h1 = tf.maximum(alpha * h1,h1)
    
            logits = tf.layers.dense(h1, 1, activation=None)
            out = tf.sigmoid(logits)
    
            return out, logits
    input_size = 784  # 28x28
    # Size of latent vector to generator
    z_size = 100
    # Sizes of hidden layers in generator and discriminator
    g_hidden_size = 128
    d_hidden_size = 128
    # Leak factor for leaky ReLU
    alpha = 0.01
    # Label smoothing
    smooth = 0.1
    
    tf.reset_default_graph()  # wipes out any graphs you've defined before, and just reset it
    # Create our input placeholders
    input_real, input_z = model_inputs(input_size,z_size)
    
    # Generator network here
    g_model  = generator(input_z, input_size, n_units=g_hidden_size)
    #generator(z, out_dim, n_units=128, reuse=False,  alpha=0.01)
    # g_model is the generator output
    
    # Disriminator network here
    d_model_real, d_logits_real = discriminator(input_real)
    d_model_fake, d_logits_fake = discriminator(g_model,reuse=True)
    
    
    # Calculate losses
    d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1 - smooth)))
    
    d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_fake)))
    
    d_loss = d_loss_real + d_loss_fake
    
    g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake)))
    
    # Optimizers
    learning_rate = 0.002
    
    # Get the trainable_variables, split into G and D parts
    t_vars = tf.trainable_variables()
    g_vars = [var for var in t_vars if var.name.startswith('generator')]
    d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
    
    d_train_opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(d_loss,var_list=d_vars)
    g_train_opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(g_loss,var_list=g_vars)
    
    batch_size = 100
    epochs = 100
    samples = []
    losses = []
    # Only save generator variables
    saver = tf.train.Saver(var_list=g_vars)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for e in range(epochs):
            for ii in range(mnist.train.num_examples//batch_size):
                batch = mnist.train.next_batch(batch_size)
    
                # Get images, reshape and rescale to pass to D
                batch_images = batch[0].reshape((batch_size, 784))
                batch_images = batch_images*2 - 1
    
                # Sample random noise for G
                batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
    
                # Run optimizers
                _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
                _ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
    
            # At the end of each epoch, get the losses and print them out
            train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
            train_loss_g = g_loss.eval({input_z: batch_z})
    
            print("Epoch {}/{}...".format(e+1, epochs),
                  "Discriminator Loss: {:.4f}...".format(train_loss_d),
                  "Generator Loss: {:.4f}".format(train_loss_g))
            # Save losses to view after training
            losses.append((train_loss_d, train_loss_g))
    
    
            # Sample from generator as we're training for viewing afterwards
            np.random.seed(0)
            sample_z = np.random.uniform(-1, 1, size=(16, z_size))
            gen_samples = sess.run(
                           generator(input_z, input_size, reuse=True),
                           feed_dict={input_z: sample_z})
            #print(gen_samples.shape)
            plt.ion()
            plt.imshow(gen_samples[0].reshape(28,28))
            plt.show()
            plt.pause(0.4)
    

    相关文章

      网友评论

          本文标题:GANs_1

          本文链接:https://www.haomeiwen.com/subject/ofykgftx.html