pytorch线性回归
import torch
import numpy as np
函数关系是 y = -5*x +3
x_train = torch.unsqueeze(torch.linspace(-10,10,100),dim=1)
y_train = -5*x_train + 3 + torch.randn(x_train.size())
print("x_train[:10]:{}".format(x_train[:10]))
print("y_train[:10]:{}".format(y_train[:10]))
x_train[:10]:tensor([[-10.0000],
[ -9.7980],
[ -9.5960],
[ -9.3939],
[ -9.1919],
[ -8.9899],
[ -8.7879],
[ -8.5859],
[ -8.3838],
[ -8.1818]])
y_train[:10]:tensor([[51.5397],
[52.7127],
[52.1205],
[50.8858],
[47.1663],
[47.2642],
[47.2564],
[45.4092],
[46.2278],
[43.5305]])
创建模型
class myLinearReg(torch.nn.Module):
def __init__(self):
super(myLinearReg,self).__init__()
self.fc = torch.nn.Linear(1,1)#(1,1)表示输入参数个数是1,输出参数个数是1
def forward(self,x):
return self.fc(x)
model = myLinearReg()
定义损失函数优化器
loss = torch.nn.MSELoss()
#定义优化器
optimizer = torch.optim.SGD(model.parameters(),lr = 0.0001)
训练
epochs = 1000
loss_list = []
epoch_list = []
for epoch in range(epochs):
#前向传播过程
y_hat = model(x_train)
loss_ = loss(y_hat,y_train)
#反向传播
optimizer.zero_grad()
loss_.backward()
optimizer.step()
if (epoch+1)%20 == 0:
print("Epoch:{}/{},loss:{:.5f}".format(epoch+1,epochs,loss_.detach().numpy()))
loss_list.append(loss_.detach().numpy())
epoch_list.append(epoch+1)
Epoch:20/1000,loss:910.51935
Epoch:40/1000,loss:695.19537
............................
Epoch:1000/1000,loss:6.75332
可视化
import matplotlib.pyplot as plt
%matplotlib
plt.scatter(x_train.numpy(),y_train.numpy(),label="original data",c = 'g',linewidth = 1)
#把模型转为预测模式
model.eval()
y_pred = model(x_train)
plt.plot(x_train.numpy(),y_pred.detach().numpy(),c='r',label='fit',linewidth=1)
plt.show()

plot.png
plt.scatter(epoch_list,loss_list,label="loss",linewidth=1)
plt.show()

loss.png
list(model.named_parameters())
[('fc.weight',
Parameter containing:
tensor([[-4.9977]], requires_grad=True)),
('fc.bias',
Parameter containing:
tensor([0.5304], requires_grad=True))]
网友评论