《动手学深度学习》(pytorch版本)Chapter 3.7 SOFTMAX回归的简洁实现 部分少了把模型和数据加载到GPU上的程序,如果只按照书上的代码运行,则会报错。这里参考机器之心pro的文章,把代码补全,用jupyter notebook 可以运行,记录一下。
import torch
from torch import nn
from torch.nn import init # initializer
import numpy as np
# import sys # 可以用来添加包,把包的地址添加到系统的环境变量中,便于搜索
# sys. path.append('..')
import d2lzh_pytorch as d2l
##################################
# gpu运算优先函数
def get_default_device():
"""Pick GPU if availbale, else CPU"""
return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device = get_default_device()
print(device)
##################################
# 移动数据到目标设备的函数
def to_device(data, device):
"""Move tensors to chonsen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking = True)
##################################
# 数据封装与加载到设备类
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for batch in self.dl:
yield to_device(batch, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
##################################
# 获取数据
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
# 封装数据加载器
train_iter = DeviceDataLoader(train_iter, device)
test_iter = DeviceDataLoader(test_iter, device)
# 读取一个批次试试
for X, y in train_iter:
print('X.device', X.device) # 不用看这个,这个数据太多
print('y', y) #看标签
break
##################################
# 定义和初始化模型
num_inputs = 784
num_outputs = 10
class LinearNet(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(LinnearNet, self).__init__()
self.linear = nn.Linear(num_inputs, num_outputs) # 定义输入输出个数,全连接层自动确定参数
def forwad(self, x): # x.shape: (batch, 1, 28, 28)
y = self.linear(x.view(x.shape[0], -1)) # 转换为batch_size, 784 才能放入全连接层
return y
##################################
# 将 x形状的改变定义为一个FlattenLayer
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, x): # x shape-> ( batch, *, *, ...)
return x.view(x.shape[0], -1)
##################################
from collections import OrderedDict
net = nn.Sequential(
# FlattenLayer(),
# nn.Linear(num_inputs, num_outputs)
OrderedDict([
('flatten', FlattenLayer()),
('linear', nn.Linear(num_inputs, num_outputs))
]
)
)
net = net.to(device) # 模型也加载到gpu上
##################################
# 初始化
init.normal_(net.linear.weight, mean = 0, std = 0.01)
init.constant_(net.linear.bias, val = 0)
##################################
# softmax运算和交叉熵损失函数直接整合在一起
loss = nn.CrossEntropyLoss()
##################################
#优化算法定义,随机梯度下降法
optimizer = torch.optim.SGD(net.parameters(), lr = 0.1)
##################################
num_epochs = 5
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)
训练与预测输出如下:
epoch 1, loss 0.0031, train acc 0.751, test acc 0.791
epoch 2, loss 0.0022, train acc 0.813, test acc 0.790
epoch 3, loss 0.0021, train acc 0.825, test acc 0.792
epoch 4, loss 0.0020, train acc 0.832, test acc 0.823
epoch 5, loss 0.0019, train acc 0.837, test acc 0.827
网友评论