nn.Module
torch.nn 是专门为深度学习设计的模块,核心数据结构是Module,既可以表示神经网络中的某个层(layer),也可以表示一个包含很多层的神经网络。
全连接神经网络
用nn.Module实现全连接神经网络,输出y和输入x满足:y=Wx+b
import torch as t
from torch import nn
from torch.autograd import Variable as V
class Linear(nn.Module): # 自定义层继承nn.Module
def __init__(self, in_features, out_features):
nn.Module.__init__(self) # 调用nn.Module的构造函数
self.w = nn.Parameter(t.randn(in_features, out_features))
self.b = nn.Parameter(t.randn(out_features))
def forward(self, x): # 定义前向传播函数
x = x.mm(self.w) # x * w
x = x + self.b.expand_as(x) # 将b矩阵的维度扩充成x矩阵的大小
return x
layer = Linear(4,3) # 初始化模型参数
input = V(t.randn(2,4))
output = layer(input)
print(output)
for name, Parameter in layer.named_parameters():
print(name, Parameter)
输出结果:
多层感知机
由两个全连接层组成,采用sigmoid函数作为激励函数。
import torch as t
from torch import nn
from torch.autograd import Variable as V
class Linear(nn.Module): # 自定义层继承nn.Module
def __init__(self, in_features, out_features):
nn.Module.__init__(self) # 调用nn.Module的构造函数
self.w = nn.Parameter(t.randn(in_features, out_features))
self.b = nn.Parameter(t.randn(out_features))
class Perceptron(nn.Module):
def __init__(self, in_features, hidden_features, out_features):
nn.Module.__init__(self)
self.layer1 = Linear(in_features, hidden_features) # 此处调用前面定义的全连接层
self.layer2 = Linear(hidden_features, out_features)
def forward(self, x):
x = self.layer1(x)
x = t.sigmoid(x)
return self.layer2(x)
perceptron = Perceptron(3, 4, 1)
for name, param in perceptron.named_parameters():
print(name, param.size())
输出结果:
网友评论