美文网首页
pytorch--1数据加载

pytorch--1数据加载

作者: yanghedada | 来源:发表于2019-03-01 10:16 被阅读0次

    构建数据Dataset和DataLoader

    
    import torch
    from torch import nn, optim
    from torch.autograd import Variable
    from torch.utils.data import DataLoader, Dataset
    from torchvision import datasets, transforms
    
    
    class myDataset(Dataset):
        # torch.utils.data.Dataset 是代表这一数据的抽象类,
        # 自己定义你的数 据类继承和重写这个抽象类,非常简单,
        # 只需要定义 len一和_getitem一这两个 函数
        def __init__(self, ):
            self.x_test = [0, 1, 2, 3]
            self.y_test = [0, 1, 2, 3]
    
        def __len__(self):
            return len(self.x_test)
    
        def __getitem__(self, item):
            data = (self.x_test[item], self.y_test[item])
            return data
    #
    data = myDataset()
    my_loader = DataLoader(data, batch_size=2, shuffle=True, drop_last=True)
    for d in data:
        print(d)
    
    print('data[3]: ', data[3])
    dataiter = iter(my_loader)
    print('dataiter', len(my_loader))
    for i in dataiter:
        print(i)
    

    构建网络

    class Net(nn.Module):
        def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
            super(Net, self).__init__()
            self.layer1 = nn.Sequential(
                nn.Linear(in_dim, n_hidden_1),
                nn.BatchNorm1d(n_hidden_1),
                nn.ReLU(True)
            )
            self.layer2 = nn.Sequential(
                nn.Linear(n_hidden_1, n_hidden_2),
                nn.BatchNorm1d(n_hidden_2),
                nn.ReLU(True)
            )
            self.layer3 = nn.Sequential(
                nn.Linear(n_hidden_2, out_dim))
    
        def forward(self, x):
            x = self.layer1(x)
            x = self.layer2(x)
            x = self.layer3(x)
            return x
    
    batch_size = 64
    learning_rate = 1e-2
    num_epoches = 1
    
    
    data_tf = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.5], [0.5]),])
    
    train_dataset = datasets.MNIST(root='./data',
                                    train=True,
                                    transform=data_tf,
                                    download=True)
    
    test_dataset = datasets.MNIST(root='./data',
                                    transform=data_tf,
                                    download=True)
    
    train_loader = DataLoader(train_dataset, batch_size=batch_size,shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size,shuffle=True)
    
    
    model = Net(28*28, 300, 100, 10)
    if torch.cuda.is_available():
        model = model.cuda()
    model.train()
    
    
    
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=learning_rate)
    
    #-----------------------------train------------------------------------------------
    for epoch in range(num_epoches):
        for data in train_loader:
            x_train, y_train = data
            x_train = x_train.view(x_train.size(0), -1)
            if torch.cuda.is_available():
                inputs = Variable(x_train).cuda
                target = Variable(y_train).cuda
            else:
                inputs = Variable(x_train)
                target = Variable(y_train)
    
            # forward
            out = model(inputs)
            loss = criterion(out, target)
    
            # backward
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    
            if (epoch + 1) :
                print('Epoch : [{}/{}], loss: {:.6f}'.format(epoch + 1,
                                                             num_epoches,
                                                             loss.item()))
    
    #-----------------------------eavl------------------------------------------------
    model.eval()
    eval_loss = 0
    eval_acc = 0
    
    for data in test_loader:
        x_test, y_test = data
        x_test = x_test.view(x_test.size(0), -1)
    
        if torch.cuda.is_available():
            inputs = Variable(x_test, volatile=True).cuda()
            target = Variable(y_test, volatile=True).cuda()
        else:
            inputs = Variable(x_test, volatile=True)
            target = Variable(y_test, volatile=True)
    
        out = model(inputs)
        loss = criterion(out, target)
        eval_loss += loss.item() * target.size(0)
        _, pred = torch.max(out, 1)
        num_correct = (pred == target).sum()
        eval_acc += num_correct.item()
    
    print('Test Loss : {:.6f}, Acc: {:.6f}'.format(
        eval_loss / (len(test_dataset)),
        eval_acc / (len(test_dataset))
    ))
    

    参考:

    PyTorch之保存加载模型
    pytorch
    yolov3-pytorch
    torch-resnet.py的官方实现
    解读官方博客resnet.py
    高效使用PyTorch

    相关文章

      网友评论

          本文标题:pytorch--1数据加载

          本文链接:https://www.haomeiwen.com/subject/jkvruqtx.html