MyGAN

作者: DeepWeaver | 来源:发表于2017-10-10 17:40 被阅读13次
    import numpy as np 
    import torch 
    import torch.nn as nn
    import torch.nn.functional as F 
    import torch.optim as optim
    from torch.autograd import Variable
    import matplotlib.pyplot as plt 
    
    data_mean = 4
    data_standard_deviation = 1.25
    g_input_size = 1     # Random noise dimension coming into generator, per output vector
    g_hidden_size = 50   # Generator complexity
    g_output_size = 1    # size of generated output vector
    
    d_input_size = 100   # Minibatch size - cardinality of distributions
    d_hidden_size = 50   # Discriminator complexity
    d_output_size = 1    # Single dimension for 'real' vs. 'fake'
    
    minibatch_size = d_input_size # use batch gradient descent
    
    d_lr = 2e-4
    g_lr = 2e-4
    optim_betas = (0.9, 0.999)
    n_epoches = 10000
    print_interval = 1400
    
    class Generator(nn.Module):
        def __init__(self, input_size, hidden_size, output_size):
            super(Generator, self).__init__()
            self.map1 = nn.Linear(input_size, hidden_size)
            self.map2 = nn.Linear(hidden_size, hidden_size)
            self.map3 = nn.Linear(hidden_size, output_size)
        def forward(self, x):
            x = F.relu(self.map1(x))
            x = F.sigmoid(self.map2(x))
            x = self.map3(x)
            return x
    
    class Discriminator(nn.Module):
        def __init__(self, input_size, hidden_size, output_size):
            super(Discriminator, self).__init__()
            self.map1 = nn.Linear(input_size, hidden_size)
            self.map2 = nn.Linear(hidden_size, hidden_size)
            self.map3 = nn.Linear(hidden_size, output_size)
        def forward(self, x):
            x = F.relu(self.map1(x))
            x = F.relu(self.map2(x))
            x = F.sigmoid(self.map3(x))
            return x
    
    
    G = Generator(input_size=g_input_size, hidden_size=g_hidden_size, output_size=g_output_size)
    D = Discriminator(input_size=d_input_size, hidden_size=d_hidden_size, output_size=d_output_size)
    # input_size = 100^2 hidden_size = 50 output_size = 1
    print(G)
    print(D)
    criterion = nn.BCELoss()
    d_optimizer = optim.Adam(D.parameters(), lr = d_lr, betas = optim_betas)
    g_optimizer = optim.Adam(G.parameters(), lr = g_lr, betas = optim_betas)
    
    def stats(d):
        return [np.mean(d), np.std(d)]
    def extract(v):
        return v.data.storage().tolist()
    
    for epoch in range(n_epoches):
        d_real_input = torch.Tensor(np.random.normal(data_mean, data_standard_deviation,(1, d_input_size)))
        g_input = torch.rand(d_input_size,g_input_size) # 100x1
    
        D.zero_grad()
        d_real_output = D(Variable(d_real_input))
        d_real_error = criterion(d_real_output, Variable(torch.ones(1)))
        d_real_error.backward()
    
        d_fake_input = G(Variable(g_input)).detach()#100x1=>100x1
        d_fake_output = D(d_fake_input.t())#100x1=>1
        #???????????
        d_fake_error = criterion(d_fake_output, Variable(torch.zeros(1)))
        d_fake_error.backward()
        d_optimizer.step()
        # Only optimizes D's parameters; changes based on stored gradients from backward()
    # ???????????????????????
    
        G.zero_grad()
        g_output = G(Variable(g_input))# it's the fake data G generated
        g_error = criterion(D(g_output.t()), Variable(torch.ones(1)))
        g_error.backward()
        g_optimizer.step()
    
        if epoch % print_interval == 0:
            print(epoch)
            print(stats(extract(Variable(d_real_input))))
            print(stats(extract(d_fake_input)))
    
    
    

    相关文章

      网友评论

          本文标题:MyGAN

          本文链接:https://www.haomeiwen.com/subject/sdkbyxtx.html