NN

作者: DeepWeaver | 来源:发表于2017-12-06 09:49 被阅读20次
# feed forward nn
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
import argparse

parser = argparse.ArgumentParser(description='feedforward_mnist')
    # learning & saving parameterss
parser.add_argument('-train', default = False, help = 'train the model')
parser.add_argument('-test', default = True, help = 'test the model')
parser.add_argument('-learning_rate', type = float, default = 0.005, help = 'initial learning rate [default = 0.001')
parser.add_argument('-num_epochs', type = int, default = 5, help = 'number of epochs of training [default = 10')
parser.add_argument('-batch_size', type = int, default = 100, help = 'batch size for training')
parser.add_argument('-input_size', type = int, default = 784, help = 'input size')
parser.add_argument('-hidden_size', type = int, default = 500, help = 'hidden size')
parser.add_argument('-output_size',type = int, default = 1, help = 'output size')
parser.add_argument('-num_classes', type = int, default = 10, help = 'hidden layer number')
parser.add_argument('-cuda',default = False, help = 'enable gpu')
args = parser.parse_args()
# Hyper Parameters 
input_size = args.input_size
hidden_size = args.hidden_size
num_classes = args.num_classes
num_epochs = args.num_epochs
batch_size = args.batch_size
learning_rate = args.learning_rate


class Net(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(Net, self).__init__()
        self.layer1 = nn.Linear(input_size, hidden_size) 
        # self.relu = nn.ReLU()
        self.layer2 = nn.Linear(hidden_size, num_classes)  
    
    def forward(self, x):
        x = F.leaky_relu(self.layer1(x))
        x = self.layer2(x)
        return x
net = Net(input_size, hidden_size, num_classes)


if args.train == True:
    train_dataset = dsets.MNIST(root='./data', 
                            train=args.train, 
                            transform=transforms.ToTensor(),  
                            download=True) 
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset, 
                                           batch_size=batch_size, 
                                           shuffle=True)
    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()  
    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)  
    # Train the Model
    for epoch in range(num_epochs):
        for i, (images, labels) in enumerate(train_loader):  
            # Convert torch tensor to Variable
            images = Variable(images.view(-1, 28*28))# image.size() = ([100, 1, 28, 28])
            labels = Variable(labels)
            
            # Forward + Backward + Optimize
            optimizer.zero_grad()  # zero the gradient buffer
            outputs = net(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
            if (i+1) % 100 == 0:
                print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f' 
                       %(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))
    torch.save(net.state_dict(), 'model.pkl')
    # save only parameters
# MNIST Dataset 

if args.test == True:
    test_dataset = dsets.MNIST(root='./data', 
                               train=args.train, 
                               transform=transforms.ToTensor())
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset, 
                                          batch_size=batch_size, 
                                          shuffle=False)
    ##############################
    net.load_state_dict(torch.load('model.pkl'))
    # load parameters to net model
    ##############################
    correct = 0
    total = 0
    for images, labels in test_loader:
        images = Variable(images.view(-1, 28*28))
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum()

    print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))

相关文章

  • Pytorch CNN搭建(NLP)

    nn.Conv1d & nn.Conv2d参考:nn.Conv1d和nn.Conv2d理解 nn.Conv1d:主...

  • Pytorch 神经网络 3

    神经网络(以下简称NN)主要用的包是torch.nn。nn基于autograd,定义模型并微分。一个nn.Modu...

  • Pytorch的第二步:(1) torch.nn.functio

    torch.nn.functional 涉及了所有 torch.nn 需要 类 和 方法 ,torch.nn 构建...

  • PyTorch 中torch.nn 与 torch.nn.fun

    两者的相同之处: nn.Xxx和nn.functional.xxx的实际功能是相同的,即nn.Conv2d和nn....

  • 微软AutoML-NNI自动化调参踩坑笔记(持续更新)

    module 'nni.retiarii.nn.pytorch.nn' has no attribute 'Har...

  • crf

    import torch import torch.nn as nn import torch.optim as ...

  • TF笔记 - 损失函数

    NN复杂度: 多用NN层数和NN参数的个数表示层数 = 隐藏层的层数+1个输出层总参数=总W+总b NN优化损失函...

  • nn

    版本-(4)-js 公告-(更多福利软件请关注公众号[安卓好软件分享吧],se.dog每个ip仅提供二十免费访问数...

  • NN

    昨天晚上,好像梦见你了:细节不记得,短暂的十几分钟,那种感觉却特别强烈冲击着情绪,我想你,也只能想你。

  • NN

网友评论

      本文标题:NN

      本文链接:https://www.haomeiwen.com/subject/qzhkixtx.html