美文网首页
PyTrch深度学习简明实战12 - 模型保存与加载

PyTrch深度学习简明实战12 - 模型保存与加载

作者: 薛东弗斯 | 来源:发表于2023-03-25 11:07 被阅读0次

学习笔记14:模型保存 - pbc的成长之路 - 博客园 (cnblogs.com)
简单模型

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import torchvision
import os
import copy
from torchvision import transforms

base_dir = r'./data/4_weather'
train_dir = os.path.join(base_dir,'train')
test_dir = os.path.join(base_dir,'test')

transform = transforms.Compose([
                  transforms.Resize((96, 96)),
                  transforms.ToTensor(),
                  transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                       std=[0.5, 0.5, 0.5])
])

train_ds = torchvision.datasets.ImageFolder(
               train_dir,
               transform=transform
)

test_ds = torchvision.datasets.ImageFolder(
               test_dir,
               transform=transform
)

BATCHSIZE = 16

train_dl = torch.utils.data.DataLoader(
                                       train_ds,
                                       batch_size=BATCHSIZE,
                                       shuffle=True
)

test_dl = torch.utils.data.DataLoader(
                                       test_ds,
                                       batch_size=BATCHSIZE,
)

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(16, 32, 3)
        self.conv3 = nn.Conv2d(32, 64, 3)
        self.fc1 = nn.Linear(64*10*10, 1024)
        self.fc2 = nn.Linear(1024, 256)
        self.fc3 = nn.Linear(256, 4)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = self.pool(F.relu(self.conv3(x)))
        x = x.view(-1, 64 * 10 * 10)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
    
model = Net()
if torch.cuda.is_available():
    model.to('cuda')
    
optim = torch.optim.Adam(model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()

def fit(epoch, model, trainloader, testloader):
    correct = 0
    total = 0
    running_loss = 0
    model.train()
    for x, y in trainloader:
        if torch.cuda.is_available():
            x, y = x.to('cuda'), y.to('cuda')
        y_pred = model(x)
        loss = loss_fn(y_pred, y)
        optim.zero_grad()
        loss.backward()
        optim.step()
        with torch.no_grad():
            y_pred = torch.argmax(y_pred, dim=1)
            correct += (y_pred == y).sum().item()
            total += y.size(0)
            running_loss += loss.item()
    epoch_loss = running_loss / len(trainloader.dataset)
    epoch_acc = correct / total
        
        
    test_correct = 0
    test_total = 0
    test_running_loss = 0 
    
    model.eval()
    with torch.no_grad():
        for x, y in testloader:
            if torch.cuda.is_available():
                x, y = x.to('cuda'), y.to('cuda')
            y_pred = model(x)
            loss = loss_fn(y_pred, y)
            y_pred = torch.argmax(y_pred, dim=1)
            test_correct += (y_pred == y).sum().item()
            test_total += y.size(0)
            test_running_loss += loss.item()
    
    epoch_test_loss = test_running_loss / len(testloader.dataset)
    epoch_test_acc = test_correct / test_total
    
        
    print('epoch: ', epoch, 
          'loss: ', round(epoch_loss, 3),
          'accuracy:', round(epoch_acc, 3),
          'test_loss: ', round(epoch_test_loss, 3),
          'test_accuracy:', round(epoch_test_acc, 3)
             )
        
    return epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc

epochs = 10

train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc = fit(epoch,
                                                                 model,
                                                                 train_dl,
                                                                 test_dl)
    train_loss.append(epoch_loss)
    train_acc.append(epoch_acc)
    test_loss.append(epoch_test_loss)
    test_acc.append(epoch_test_acc)
epoch:  0 loss:  0.047 accuracy: 0.653 test_loss:  0.037 test_accuracy: 0.804
epoch:  1 loss:  0.028 accuracy: 0.839 test_loss:  0.038 test_accuracy: 0.844
epoch:  2 loss:  0.023 accuracy: 0.856 test_loss:  0.034 test_accuracy: 0.836
epoch:  3 loss:  0.022 accuracy: 0.871 test_loss:  0.034 test_accuracy: 0.796
epoch:  4 loss:  0.018 accuracy: 0.893 test_loss:  0.032 test_accuracy: 0.853
epoch:  5 loss:  0.018 accuracy: 0.9 test_loss:  0.033 test_accuracy: 0.862
epoch:  6 loss:  0.015 accuracy: 0.906 test_loss:  0.034 test_accuracy: 0.88
epoch:  7 loss:  0.012 accuracy: 0.926 test_loss:  0.038 test_accuracy: 0.849
epoch:  8 loss:  0.015 accuracy: 0.918 test_loss:  0.034 test_accuracy: 0.893
epoch:  9 loss:  0.008 accuracy: 0.946 test_loss:  0.043 test_accuracy: 0.867

保存模型

state_dict就是一个简单的Python字典,它将模型中的可训练参数(比如weights和biases,batchnorm的running_mean、torch.optim参数等)通过将模型每层与层的参数张量之间一一映射,实现保存、更新、变化和再存储。

PATH = './my_net.pth'
torch.save(model.state_dict(), PATH)

恢复模型

new_model = Net()
new_model.load_state_dict(torch.load(PATH))

new_model.to(device)

Net(
  (conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1))
  (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1))
  (conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))
  (fc1): Linear(in_features=6400, out_features=1024, bias=True)
  (fc2): Linear(in_features=1024, out_features=256, bias=True)
  (fc3): Linear(in_features=256, out_features=4, bias=True)
)
test_correct = 0
test_total = 0
new_model.eval()
with torch.no_grad():
    for x, y in test_dl:
        if torch.cuda.is_available():
            x, y = x.to('cuda'), y.to('cuda')
        y_pred = new_model(x)
        y_pred = torch.argmax(y_pred, dim=1)
        test_correct += (y_pred == y).sum().item()
        test_total += y.size(0)
    
epoch_test_acc = test_correct / test_total
print(epoch_test_acc)

训练函数保存最优参数

model = Net()
if torch.cuda.is_available():
    model.to('cuda')
    
optim = torch.optim.Adam(model.parameters(), lr=0.001)
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0

train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc = fit(epoch,
                                                                 model,
                                                                 train_dl,
                                                                 test_dl)
    train_loss.append(epoch_loss)
    train_acc.append(epoch_acc)
    test_loss.append(epoch_test_loss)
    test_acc.append(epoch_test_acc)
    
    
    if epoch_test_acc > best_acc:
        best_acc = epoch_acc
        best_model_wts = copy.deepcopy(model.state_dict())        

model.load_state_dict(best_model_wts)
model.eval()

完整模型的保存和加载

PATH = './my_whole_model.pth'
torch.save(model, PATH)
new_model2 = torch.load(PATH)
new_model2.eval()
#Net(
#  (conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1))
#  (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
#  (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1))
#  (conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))
#  (fc1): Linear(in_features=6400, out_features=1024, bias=True)
#  (fc2): Linear(in_features=1024, out_features=256, bias=True)
#  (fc3): Linear(in_features=256, out_features=4, bias=True)
#)

跨设备的模型保存和加载

GPU保存,CPU加载

PATH = './my_gpu_model_wts'
torch.save(model.state_dict(), PATH)
device = torch.device('cpu')
model = Net()
model.load_state_dict(torch.load(PATH, map_location=device))

保存在GPU 上,在 GPU 上加载

PATH = './my_gpu_model2_wts'
torch.save(model.state_dict(), PATH)
device = torch.device("cuda")
model = Net()
model.load_state_dict(torch.load(PATH))
model.to(device)

保存 CPU 上,在 GPU 上加载

PATH = 'my_cpu_wts.pth'
torch.save(model.state_dict(), PATH)
device = torch.device("cuda")
model = Net()
model.load_state_dict(torch.load(PATH, map_location="cuda:0"))  # Choose whatever GPU device number you want
model.to(device)

相关文章

网友评论

      本文标题:PyTrch深度学习简明实战12 - 模型保存与加载

      本文链接:https://www.haomeiwen.com/subject/yuigrdtx.html