美文网首页
Pytorch学习笔记四——线性回归示例(简单)

Pytorch学习笔记四——线性回归示例(简单)

作者: 深思海数_willschang | 来源:发表于2020-02-08 16:22 被阅读0次

本篇主要记录一个基于pytorch实现的简单线性回归例子。


image.png

代码如下:

import numpy as np
import torch
import torch.utils.data as Data
import torch.nn as nn
from torch.nn import init
import torch.optim as optim


num_inputs = 2
num_samples = 1000
demo_w = [2, -3.4]
demo_b = 4.2 

# 特征数据
features = torch.tensor(
                    np.random.normal(0, 1, (num_samples, num_inputs)), dtype=torch.float
                )
# y = w1*x1 + w2*x2 + b
labels = demo_w[0] * features[:, 0] + demo_w[1] * features[:, 1] + demo_b
# 加上噪声
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)

# 定义batch size
batch_size = 10
# 将训练数据的特征和标签组合起来
dataset = Data.TensorDataset(features, labels)
#随机读取小批量数据集
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)

# 探索下数据结构
for X, y in data_iter:
    print(X, y)
    break
"""
tensor([[-0.7070, -1.6496],
        [-0.2716, -0.4995],
        [ 0.9433,  0.6106],
        [-0.0870, -0.9034],
        [-0.8537, -0.6853],
        [-0.2841,  0.8076],
        [-0.3477,  0.4447],
        [-0.6278,  1.1270],
        [-0.0032,  2.2277],
        [-0.3173,  0.0662]]) tensor([ 8.4213,  5.3458,  3.9993,  7.0978,  4.8204,  0.8734,  1.9918, -0.8872,
        -3.3933,  3.3439])
"""
    
# 定义模型
net = nn.Sequential(
    nn.Linear(num_inputs, 1)
)
print(net)
"""
Sequential(
  (0): Linear(in_features=2, out_features=1, bias=True)
)
"""

"""其他方式搭建网络
# 1  ???
class LinearNet(nn.Module):
    def __init__(self, n_feature):
        super(LinearNet, self).__init__()
        self.linear = nn.Linear(n_feature, 1)
        
    def forward(self, x):
        y = self.linear(x)
        return y

# 2
net = nn.Sequential()
net.add_module('linear', nn.Linear(num_inputs, 1))

# 3
from collections import OrderedDict
net = nn.Sequential(OrderedDict([
('linear', nn.Linear(num_inputs, 1))
]))
"""

# 通过net.parameters()查看模型所有可学习参数
for param in net.parameters():
    print(param)
"""
Parameter containing:
tensor([[-0.2307, -0.5035]], requires_grad=True)
Parameter containing:
tensor([0.0936], requires_grad=True)
"""
# print(net[0])
    
# 初始化模型参数, 均值为0, 标准差为0.01正态分布
init.normal_(net[0].weight, mean=0, std=0.01)
init.constant_(net[0].bias, val=0)  # 也可 net[0].bias.data.fill_(0)

# 定义损失函数
loss = nn.MSELoss()

# 定义优化算法 
optimizer = optim.SGD(net.parameters(), lr=0.03)
print(optimizer)
"""
SGD (
Parameter Group 0
    dampening: 0
    lr: 0.03
    momentum: 0
    nesterov: False
    weight_decay: 0
)
"""

# 训练 模型
num_epochs = 5
for epoch in range(1, num_epochs+1):
    for X, y in data_iter:
        output = net(X)
        l = loss(output, y.view(-1, 1))
        optimizer.zero_grad() # 梯度清零,net.zero_grad()
        l.backward()
        optimizer.step()
        
    print('epoch {}, loss: {}'.format(epoch, l.item()))
    
# 取得训练后的模型参数
dense = net[0]
print(demo_w, dense.weight)
print(demo_b, dense.bias)
"""
[2, -3.4] Parameter containing:
tensor([[ 2.0001, -3.4005]], requires_grad=True)
4.2 Parameter containing:
tensor([4.2010], requires_grad=True)
"""

相关文章

网友评论

      本文标题:Pytorch学习笔记四——线性回归示例(简单)

      本文链接:https://www.haomeiwen.com/subject/spyixhtx.html