美文网首页
PyTrch深度学习简明实战5 - 多层感知器 - MINIST

PyTrch深度学习简明实战5 - 多层感知器 - MINIST

作者: 薛东弗斯 | 来源:发表于2023-03-11 11:03 被阅读0次

学习笔记8:全连接网络实现MNIST分类(torch内置数据集) - pbc的成长之路 - 博客园 (cnblogs.com)
学习笔记9:卷积神经网络实现MNIST分类(GPU加速) - pbc的成长之路 - 博客园 (cnblogs.com)

http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz

torchvision.datasets — Torchvision master documentation (gitee.io)
torchvision · PyPI

torch.utils.data.DataLoader 对dataset进行封装,主要做了4件事情
1. 乱序 shuffle
2. 将数据采样为小批次 batch_size来指定数据批次的大小   
单个数据放入模型中进行训练,缺点是损失和梯度会受到单个样本的影响。如果样本不均匀,甚至有错误标记的样本,会引起模型梯度的巨大震荡。
会受到单张图片的巨大影响。 
为什么不使用全部数据? 如imagenet 有130G数据量。 折衷方式,用batch方法。 
按照批次遍历,可以使得训练稳定,且节省计算资源。 dataloader这个类就会帮忙从数据集中依次取出批量数据交给模型,模型会根据整个批次计算损失,
根据损失的梯度进行梯度下降。
3.充分利用多个子进程,加速数据读取 num_workers
4.设置批次处理函数,collate_fn,会按照函数定义做一系列的处理。 文本分类中会用到它。
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import torchvision
from torchvision.transforms import ToTensor

# 下载数据集到本地
train_ds = torchvision.datasets.MNIST('data/',        
                                      train=True,
                                      transform=ToTensor(),
              
                        download=True
                                     )

test_ds = torchvision.datasets.MNIST('data/',
                                     train=False,
                                     transform=ToTensor(),
                                     download=True  
)

train_dl = torch.utils.data.DataLoader(train_ds, batch_size=64, shuffle=True)
test_dl = torch.utils.data.DataLoader(test_ds, batch_size=256)

imgs, labels = next(iter(train_dl))
# imgs.shape 
# torch.Size([64, 1, 28, 28])
# 第1维 是图片的数量,第2维是channel,第3维是高,第4维是宽。 Minis数据集是黑白图片,因此channel为1

plt.figure(figsize=(10, 1))       # 规定图片大小,因为10张图片,因此长度为10,高度为1
for i, img in enumerate(imgs[:10]): # 绘制10张图片,每张图片都是1,28,28。
    npimg = img.numpy()           # 转换为ndarray,
    npimg = np.squeeze(npimg)     # np.squeeze(img) 将表示channel的维度1去掉。 matplotlib中无法绘制维度为1的图
    plt.subplot(1, 10, i+1)       # 绘制子图,1行,10列
    plt.imshow(npimg)
    plt.axis('off')
    plt.savefig('3.1.jpg', dpi=400)
print(labels[:10])
# 创建模型
class Model(nn.Module):                         # 所有的pytorch都继承至nn.Module
    def __init__(self):        # 初始化3个层
        super().__init__()     # 继承父类属性   # nn.Linear, 全连接层,输入数据要求是一维的.(batch,features) 无论多少输入,后面都展平成一维 features
        self.liner_1 = nn.Linear(28*28, 120)    # 第一层输入28*28(1*28*28 展平后的长度 28*28), 输出120个单元
        self.liner_2 = nn.Linear(120, 84)       # 第二层输出84个单元,输入是上一层的输出神经元个数。 这里的120/84都是自己选的,超参数
        self.liner_3 = nn.Linear(84, 10)        # 第三层输出10个分类。 softmax 模型输出C个可能值上的概率。C表示类别总数
    def forward(self, input):  # 具体实现,用forward方法
        x = input.view(-1, 28*28)       # 用view方法,将输入展平为-1,28*28. 第一维-1 为batch
        x = F.relu(self.liner_1(x))     # 每个中间层都要进行激活
        x = F.relu(self.liner_2(x))
        x = self.liner_3(x)             # 此处的x应该叫logits,未激活前的输出。 softmax归一化处理后,才输出概率值。 如果没有用softmax,则用argmax也可以取出概率分布最大的值。
        return x

device = "cuda" if torch.cuda.is_available() else "cpu"
# print("Using {} device".format(device))
model = Model().to(device)
# Model(
#  (liner_1): Linear(in_features=784, out_features=120, bias=True)
#  (liner_2): Linear(in_features=120, out_features=84, bias=True)
#  (liner_3): Linear(in_features=84, out_features=10, bias=True)
#)

loss_fn = torch.nn.CrossEntropyLoss()  # 损失函数,输入应该是未经激活的输出。输出targets就是分类索引,而非独热编码方式。
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)  # 优化:根据计算得到的损失,调整模型参数,从而降低损失的过程

# train函数
def train(dataloader, model, loss_fn, optimizer):  
    size = len(dataloader.dataset)    # 获取当前数据集的总样本数。 dataloader.dataset 获取转换为dataloader之前的dataset。num_batches = len(dl) 返回迭代的批次数
    train_loss, correct = 0, 0        # train_loss会累计所有批次的损失之和; correct 累计预测正确的样本数        
    for X, y in dataloader:          # X代表输入,y代表target标签
        X, y = X.to(device), y.to(device)

        # Compute prediction error
        pred = model(X)
        loss = loss_fn(pred, y)   # 返回一个批次所有样本的平均损失

        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        with torch.no_grad():
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()   # argmax(0)是batch 位。argmax(1)是实际预测的值
            train_loss += loss.item()      
    train_loss /= size       # 每个样本的平均loss
    correct /= size          # 正确率
    return train_loss, correct

# 测试函数
def test(dataloader, model):
    size = len(dataloader.dataset)
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= size
    correct /= size

    return test_loss, correct

epochs = 50

train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    epoch_loss, epoch_acc = train(train_dl, model, loss_fn, optimizer)
    epoch_test_loss, epoch_test_acc = test(test_dl, model)
    train_loss.append(epoch_loss)
    train_acc.append(epoch_acc)
    test_loss.append(epoch_test_loss)
    test_acc.append(epoch_test_acc)
    
    template = ("epoch:{:2d}, train_loss: {:.5f}, train_acc: {:.1f}% ," 
                "test_loss: {:.5f}, test_acc: {:.1f}%")
    print(template.format(
          epoch, epoch_loss, epoch_acc*100, epoch_test_loss, epoch_test_acc*100))
    
print("Done!")

plt.plot(range(1, epochs+1), train_loss, label='train_loss', lw=2)
plt.plot(range(1, epochs+1), test_loss, label='test_loss', lw=2, ls="--")
plt.xlabel('epoch')
plt.legend()
plt.savefig('2-4-3.jpg', dpi=400)

plt.plot(range(1, epochs+1), train_acc, label='train_acc', lw=2)
plt.plot(range(1, epochs+1), test_acc, label='test_acc', lw=2, ls="--")
plt.xlabel('epoch')
plt.legend()
plt.savefig('2-4-4.jpg', dpi=400)
import numpy as no
np.argmax([0.9,0.04,0.06])   # 返回最大值对应的索引
# 0

完整代码

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import torchvision
from torchvision.transforms import ToTensor

# 下载数据集到本地
train_ds = torchvision.datasets.MNIST('data/',        
                                      train=True,
                                      transform=ToTensor(),
              
                        download=True
                                     )

test_ds = torchvision.datasets.MNIST('data/',
                                     train=False,
                                     transform=ToTensor(),
                                     download=True  
)

train_dl = torch.utils.data.DataLoader(train_ds, batch_size=64, shuffle=True)
test_dl = torch.utils.data.DataLoader(test_ds, batch_size=256)

imgs, labels = next(iter(train_dl))
# imgs.shape 
# torch.Size([64, 1, 28, 28])
# 第1维 是图片的数量,第2维是channel,第3维是高,第4维是宽。 Minis数据集是黑白图片,因此channel为1

# 创建模型
class Model(nn.Module):                         # 所有的pytorch都继承至nn.Module
    def __init__(self):        # 初始化3个层
        super().__init__()     # 继承父类属性   # nn.Linear, 全连接层,输入数据要求是一维的.(batch,features) 无论多少输入,后面都展平成一维 features
        self.liner_1 = nn.Linear(28*28, 120)    # 第一层输入28*28(1*28*28 展平后的长度 28*28), 输出120个单元
        self.liner_2 = nn.Linear(120, 84)       # 第二层输出84个单元,输入是上一层的输出神经元个数。 这里的120/84都是自己选的,超参数
        self.liner_3 = nn.Linear(84, 10)        # 第三层输出10个分类。 softmax 模型输出C个可能值上的概率。C表示类别总数
    def forward(self, input):  # 具体实现,用forward方法
        x = input.view(-1, 28*28)       # 用view方法,将输入展平为-1,28*28. 第一维-1 为batch
        x = F.relu(self.liner_1(x))     # 每个中间层都要进行激活
        x = F.relu(self.liner_2(x))
        x = self.liner_3(x)             # 此处的x应该叫logits,未激活前的输出。 softmax归一化处理后,才输出概率值。 如果没有用softmax,则用argmax也可以取出概率分布最大的值。
        return x

device = "cuda" if torch.cuda.is_available() else "cpu"
# print("Using {} device".format(device))
model = Model().to(device)
# Model(
#  (liner_1): Linear(in_features=784, out_features=120, bias=True)
#  (liner_2): Linear(in_features=120, out_features=84, bias=True)
#  (liner_3): Linear(in_features=84, out_features=10, bias=True)
#)

loss_fn = torch.nn.CrossEntropyLoss()  # 损失函数,输入应该是未经激活的输出。输出targets就是分类索引,而非独热编码方式。
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)  # 优化:根据计算得到的损失,调整模型参数,从而降低损失的过程

# train函数
def train(dataloader, model, loss_fn, optimizer):  
    size = len(dataloader.dataset)    # 获取当前数据集的总样本数。 dataloader.dataset 获取转换为dataloader之前的dataset。num_batches = len(dl) 返回迭代的批次数
    train_loss, correct = 0, 0        # train_loss会累计所有批次的损失之和; correct 累计预测正确的样本数        
    for X, y in dataloader:          # X代表输入,y代表target标签
        X, y = X.to(device), y.to(device)

        # Compute prediction error
        pred = model(X)
        loss = loss_fn(pred, y)   # 返回一个批次所有样本的平均损失

        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        with torch.no_grad():
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()   # argmax(0)是batch 位。argmax(1)是实际预测的值
            train_loss += loss.item()      
    train_loss /= size       # 每个样本的平均loss
    correct /= size          # 正确率
    return train_loss, correct

# 测试函数
def test(dataloader, model):
    size = len(dataloader.dataset)
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= size
    correct /= size

    return test_loss, correct

epochs = 50

train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    epoch_loss, epoch_acc = train(train_dl, model, loss_fn, optimizer)
    epoch_test_loss, epoch_test_acc = test(test_dl, model)
    train_loss.append(epoch_loss)
    train_acc.append(epoch_acc)
    test_loss.append(epoch_test_loss)
    test_acc.append(epoch_test_acc)
    
    template = ("epoch:{:2d}, train_loss: {:.5f}, train_acc: {:.1f}% ," 
                "test_loss: {:.5f}, test_acc: {:.1f}%")
    print(template.format(
          epoch, epoch_loss, epoch_acc*100, epoch_test_loss, epoch_test_acc*100))
    
print("Done!")

执行结果

epoch: 0, train_loss: 0.03585, train_acc: 11.6% ,test_loss: 0.00911, test_acc: 14.9%
epoch: 1, train_loss: 0.03533, train_acc: 21.6% ,test_loss: 0.00894, test_acc: 32.3%
epoch: 2, train_loss: 0.03453, train_acc: 39.7% ,test_loss: 0.00868, test_acc: 46.5%
epoch: 3, train_loss: 0.03322, train_acc: 51.9% ,test_loss: 0.00824, test_acc: 56.3%
epoch: 4, train_loss: 0.03107, train_acc: 58.4% ,test_loss: 0.00754, test_acc: 60.5%
epoch: 5, train_loss: 0.02779, train_acc: 62.8% ,test_loss: 0.00653, test_acc: 64.8%
epoch: 6, train_loss: 0.02359, train_acc: 67.6% ,test_loss: 0.00540, test_acc: 69.9%
epoch: 7, train_loss: 0.01940, train_acc: 72.5% ,test_loss: 0.00440, test_acc: 75.2%
epoch: 8, train_loss: 0.01602, train_acc: 76.8% ,test_loss: 0.00366, test_acc: 78.7%
epoch: 9, train_loss: 0.01354, train_acc: 79.6% ,test_loss: 0.00312, test_acc: 80.9%
epoch:10, train_loss: 0.01178, train_acc: 81.6% ,test_loss: 0.00275, test_acc: 82.6%
epoch:11, train_loss: 0.01053, train_acc: 83.0% ,test_loss: 0.00248, test_acc: 84.0%
epoch:12, train_loss: 0.00961, train_acc: 84.1% ,test_loss: 0.00228, test_acc: 84.9%
epoch:13, train_loss: 0.00892, train_acc: 84.9% ,test_loss: 0.00212, test_acc: 85.7%
epoch:14, train_loss: 0.00838, train_acc: 85.6% ,test_loss: 0.00200, test_acc: 86.4%
epoch:15, train_loss: 0.00796, train_acc: 86.2% ,test_loss: 0.00190, test_acc: 87.0%
epoch:16, train_loss: 0.00761, train_acc: 86.7% ,test_loss: 0.00182, test_acc: 87.5%
epoch:17, train_loss: 0.00732, train_acc: 87.1% ,test_loss: 0.00176, test_acc: 87.8%
epoch:18, train_loss: 0.00708, train_acc: 87.5% ,test_loss: 0.00170, test_acc: 88.2%
epoch:19, train_loss: 0.00687, train_acc: 87.9% ,test_loss: 0.00165, test_acc: 88.5%
epoch:20, train_loss: 0.00669, train_acc: 88.1% ,test_loss: 0.00161, test_acc: 88.8%
epoch:21, train_loss: 0.00653, train_acc: 88.4% ,test_loss: 0.00157, test_acc: 89.1%
epoch:22, train_loss: 0.00639, train_acc: 88.6% ,test_loss: 0.00154, test_acc: 89.3%
epoch:23, train_loss: 0.00626, train_acc: 88.8% ,test_loss: 0.00151, test_acc: 89.4%
epoch:24, train_loss: 0.00615, train_acc: 89.0% ,test_loss: 0.00148, test_acc: 89.5%
epoch:25, train_loss: 0.00605, train_acc: 89.1% ,test_loss: 0.00146, test_acc: 89.7%
epoch:26, train_loss: 0.00596, train_acc: 89.3% ,test_loss: 0.00143, test_acc: 89.8%
epoch:27, train_loss: 0.00587, train_acc: 89.5% ,test_loss: 0.00142, test_acc: 90.0%
epoch:28, train_loss: 0.00578, train_acc: 89.5% ,test_loss: 0.00140, test_acc: 90.0%
epoch:29, train_loss: 0.00571, train_acc: 89.7% ,test_loss: 0.00138, test_acc: 90.2%
epoch:30, train_loss: 0.00564, train_acc: 89.8% ,test_loss: 0.00136, test_acc: 90.3%
epoch:31, train_loss: 0.00557, train_acc: 89.9% ,test_loss: 0.00135, test_acc: 90.3%
epoch:32, train_loss: 0.00551, train_acc: 90.0% ,test_loss: 0.00133, test_acc: 90.3%
epoch:33, train_loss: 0.00545, train_acc: 90.1% ,test_loss: 0.00132, test_acc: 90.4%
epoch:34, train_loss: 0.00540, train_acc: 90.2% ,test_loss: 0.00131, test_acc: 90.5%
epoch:35, train_loss: 0.00534, train_acc: 90.3% ,test_loss: 0.00129, test_acc: 90.5%
epoch:36, train_loss: 0.00529, train_acc: 90.4% ,test_loss: 0.00128, test_acc: 90.6%
epoch:37, train_loss: 0.00524, train_acc: 90.5% ,test_loss: 0.00127, test_acc: 90.8%
epoch:38, train_loss: 0.00520, train_acc: 90.6% ,test_loss: 0.00126, test_acc: 90.8%
epoch:39, train_loss: 0.00515, train_acc: 90.6% ,test_loss: 0.00125, test_acc: 91.0%
epoch:40, train_loss: 0.00511, train_acc: 90.7% ,test_loss: 0.00124, test_acc: 91.0%
epoch:41, train_loss: 0.00507, train_acc: 90.8% ,test_loss: 0.00123, test_acc: 91.1%
epoch:42, train_loss: 0.00502, train_acc: 90.9% ,test_loss: 0.00122, test_acc: 91.2%
epoch:43, train_loss: 0.00498, train_acc: 91.0% ,test_loss: 0.00121, test_acc: 91.2%
epoch:44, train_loss: 0.00495, train_acc: 91.0% ,test_loss: 0.00120, test_acc: 91.3%
epoch:45, train_loss: 0.00491, train_acc: 91.1% ,test_loss: 0.00119, test_acc: 91.3%
epoch:46, train_loss: 0.00487, train_acc: 91.1% ,test_loss: 0.00118, test_acc: 91.4%
epoch:47, train_loss: 0.00484, train_acc: 91.2% ,test_loss: 0.00117, test_acc: 91.5%
epoch:48, train_loss: 0.00480, train_acc: 91.2% ,test_loss: 0.00117, test_acc: 91.6%
epoch:49, train_loss: 0.00477, train_acc: 91.3% ,test_loss: 0.00116, test_acc: 91.6%
Done!
plt.plot(range(1, epochs+1), train_loss, label='train_loss', lw=2)
plt.plot(range(1, epochs+1), test_loss, label='test_loss', lw=2, ls="--")
plt.xlabel('epoch')
plt.legend()
plt.savefig('2-4-3.jpg', dpi=400)
image.png
plt.figure(figsize=(10, 1))       # 规定图片大小,因为10张图片,因此长度为10,高度为1
for i, img in enumerate(imgs[:10]): # 绘制10张图片,每张图片都是1,28,28。
    npimg = img.numpy()           # 转换为ndarray,
    npimg = np.squeeze(npimg)     # np.squeeze(img) 将表示channel的维度1去掉。 matplotlib中无法绘制维度为1的图
    plt.subplot(1, 10, i+1)       # 绘制子图,1行,10列
    plt.imshow(npimg)
    plt.axis('off')
    plt.savefig('3.1.jpg', dpi=400)
print(labels[:10])
image.png
plt.plot(range(1, epochs+1), train_acc, label='train_acc', lw=2)
plt.plot(range(1, epochs+1), test_acc, label='test_acc', lw=2, ls="--")
plt.xlabel('epoch')
plt.legend()
plt.savefig('2-4-4.jpg', dpi=400)

教程完整代码

import torchvision
from torchvision.transforms import ToTensor
import torch

train_ds = torchvision.datasets.MNIST('data',
                                      train=True,
                                      transform=ToTensor(),
                                      download=True)
                                      
test_ds = torchvision.datasets.MNIST('data',
                                      train=False,
                                      transform=ToTensor(),
                                      download=True)
                                     
# 常见的图片格式: (高, 宽, 通道)
# ToTensor()  作用:

#   1. 将输入转为tensor
#   2. 规范图片格式为 (channel, height, width)
#   3. 将像素取值范围规范到(0,1)

# torch.utils.data.DataLoader  对dataset进行封装:
#    1.乱序 shuffle
#    2.将数据采样为小批次 batch_size  : 小批次的批次训练
#    3. num_workers
#    4. 设置批次处理函数 collate_fn

train_dl = torch.utils.data.DataLoader(train_ds, 
                                       batch_size=64,
                                       shuffle=True)
                                       
test_dl = torch.utils.data.DataLoader(test_ds, 
                                       batch_size=64)
                                       
imgs, labels = next(iter(train_dl))
imgs.shape  #torch.Size([64, 1, 28, 28])

import matplotlib.pyplot as plt
import numpy as np

plt.figure(figsize=(10, 1))
for i, img in enumerate(imgs[:10]):
    npimg = img.numpy()
    npimg = np.squeeze(npimg)
    plt.subplot(1, 10, i+1)
    plt.imshow(npimg)
    plt.axis('off')
    
from torch import nn
# nn.Linear() # 全连接层 要求输入的数据是一维   (batch, features)

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.linear_1 = nn.Linear(28*28, 120)
        self.linear_2 = nn.Linear(120, 84)
        self.linear_3 = nn.Linear(84, 10)
    def forward(self, input):
        x = input.view(-1, 1*28*28)
        x = torch.relu(self.linear_1(x))
        x = torch.relu(self.linear_2(x))
        logits = self.linear_3(x)
        return logits    # 未激活的输出,叫做logits
        
#模型输出是C个可能值上概率, C表示类别总数  np.argmax([0.1, 0.2, 0.8])
#  logits                                 【5, 10, 40】

# 两个概率分布的计算: [0.9, 0.04, 0.06]     [0, 0, 1]
# 定义损失函数
loss_fn = torch.nn.CrossEntropyLoss()

# 优化: 根据计算得到的损失,调整模型参数,降低损失的过程  Adam、SGD
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model().to(device)   # 初始化模型

opt = torch.optim.SGD(model.parameters(), lr=0.001)

# 训练函数
def train(dl, model, loss_fn, optimizer):
    size = len(dl.dataset)    
    num_batches = len(dl)
        
    train_loss, correct = 0, 0
        
    for x, y in dl:
        x, y = x.to(device), y.to(device)
        pred = model(x)
        loss = loss_fn(pred, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        with torch.no_grad():
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
            train_loss += loss.item()
    correct /= size
    train_loss /= num_batches
    return correct, train_loss
    
# 测试函数
def test(test_dl, model, loss_fn):
    size = len(test_dl.dataset)    
    num_batches = len(test_dl)
    test_loss, correct = 0, 0
    with torch.no_grad():
        for x, y in test_dl:
            x, y = x.to(device), y.to(device)
            pred = model(x)
            loss = loss_fn(pred, y)
            test_loss += loss.item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
        correct /= size
        test_loss /= num_batches
        return correct, test_loss
        
# 训练50 个epoch, 每一个epoch代表将全部数据集训练一遍

epochs = 50
train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    epoch_acc, epoch_loss = train(train_dl, model, loss_fn, opt)
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
    train_acc.append(epoch_acc)
    train_loss.append(epoch_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    
    template = ("epoch:{:2d}, train_Loss:{:.5f}, train_acc:{:.1f},test_Loss:{:.5f}, test_acc:{:.1f}")
    
    print(template.format(epoch, epoch_loss, epoch_acc*100, epoch_test_loss, epoch_test_acc*100))
print('Done')
import matplotlib.pyplot as plt
plt.plot(range(epochs), train_loss, label='train_loss')
plt.plot(range(epochs), test_loss, label='test_loss')
plt.legend()
image.png
plt.plot(range(epochs), train_acc, label='train_acc')
plt.plot(range(epochs), test_acc, label='test_acc')
plt.legend()
image.png

相关文章

网友评论

      本文标题:PyTrch深度学习简明实战5 - 多层感知器 - MINIST

      本文链接:https://www.haomeiwen.com/subject/khlxrdtx.html