美文网首页
Chapter 05 误差反向传播法

Chapter 05 误差反向传播法

作者: 蜜糖_7474 | 来源:发表于2019-03-01 17:52 被阅读0次

    本节将用Python实现前面的购买苹果的例子。这里,我们把要实现的计算图的乘法节点称为“乘法层”(MulLayer),加法节点称为“加法层”(AddLayer)。


    购买2个苹果.png

    乘法层的实现

    class MulLayer:
        def __init__(self):
            self.x = None
            self.y = None
    
        def forward(self, x, y):
            self.x = x
            self.y = y
            return x * y
    
        def backward(self, dout):
            dx = dout * self.y
            dy = dout * self.x
            return dx, dy
    
    apple = 100
    apple_num = 2
    tax = 1.1
    
    #layer
    mul_apple_layer = MulLayer()
    mul_tax_layer = MulLayer()
    
    #forward
    apple_price = mul_apple_layer.forward(apple, apple_num)
    price = mul_tax_layer.forward(apple_price, tax)
    
    print(price)
    
    #backward
    dprice = 1
    dapple_price, dtax = mul_tax_layer.backward(dprice)
    dapple, dapple_num = mul_apple_layer.backward(dapple_price)
    print(dapple, dapple_num, dtax)
    

    220.00000000000003
    2.2 110.00000000000001 200

    加法层的实现

    class AddLayer:
        def __init__(self):
            pass
        def forward(self, x, y):
            out = x + y
            return out
        def backward(self, dout):
            dx = dout * 1
            dy = dout * 1
            return dx, dy
    

    使用加法层和乘法层,实现下图所示的购买2 个苹果和3个橘子的例子。

    买苹果橘子.png
    apple = 100
    apple_num = 2
    tax = 1.1
    orange = 150
    orange_num = 3
    tax = 1.1
    
    #layer
    mul_apple_layer = MulLayer()
    mul_orange_layer = MulLayer()
    add_apple_orange_layer = AddLayer()
    mul_tax_layer = MulLayer()
    
    #forward
    apple_price = mul_apple_layer.forward(apple, apple_num)
    orange_price = mul_orange_layer.forward(orange, orange_num)
    all_price = add_apple_orange_layer.forward(apple_price, orange_price)
    price = mul_tax_layer.forward(all_price, tax)
    
    #backward
    dprice = 1
    dall_price, dtax = mul_tax_layer.backward(dprice)
    dapple_price, dorange_price = add_apple_orange_layer.backward(dall_price)
    dapple, dapple_num = mul_apple_layer.backward(dapple_price)
    dorange, dorange_num = mul_orange_layer.backward(dorange_price)
    
    print(price)
    print(dapple_num, dapple, dorange, dorange_num, dtax)
    

    715.0000000000001
    110.00000000000001 2.2 3.3000000000000003 165.0 650

    激活函数层的实现

    class ReLU:
        def __init__(self):
            self.mask = None
    
        def forward(self, x):
            self.mask = (x <= 0)
            out = x.copy()
            out[self.mask] = 0
            return out
    
        def backward(self, dout):
            dout[self.mask] = 0
            return dout
    
    
    class Sigmoid:
        def __init__(self):
            self.out = None
    
        def forward(self, x):
            out = 1 / (1 + np.exp(-x))
            self.out = out
            return out
    
        def backward(self, dout):
            return dout * (1.0 - self.out) * self.out
    

    Affine 层

    神经网络的正向传播中进行的矩阵的乘积运算在几何学领域被称为“仿射变换”。因此,这里将进行仿射变换的处理实现为“Affine层”。

    class Affine:
        def __init__(self, W, b):
            self.W = W
            self.b = b
            self.x = None
            self.dW = None
            self.db = None
    
        def forward(self, x):
            self.x = x
            out = np.dot(x, self.W) + self.b
            return out
    
        def backward(self, dout):
            dx = np.dot(dout, self.W.T)
            self.dW = np.dot(self.x.T, dout)
            self.db = np.sum(dout, axis=0)
            return dx
    

    Softmax-with-Loss 层

    def softmax(x):
        if x.ndim == 2:
            x = x.T
            x = x - np.max(x, axis=0)
            y = np.exp(x) / np.sum(np.exp(x), axis=0)
            return y.T
    
        x = x - np.max(x)  # 溢出对策
        return np.exp(x) / np.sum(np.exp(x))
    
    
    def cross_entropy_error(y, t):
        if y.ndim == 1:
            t = t.reshape(1, t.size)
            y = y.reshape(1, y.size)
    
        # 监督数据是one-hot-vector的情况下,转换为正确解标签的索引
        if t.size == y.size:
            t = t.argmax(axis=1)
    
        batch_size = y.shape[0]
        return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size
    
    
    class SoftmaxWithLoss:
        def __init__(self):
            self.loss = None
            self.y = None
            self.t = None
    
        def forward(self, x, t):
            self.t = t
            self.y = softmax(x)
            self.loss = cross_entropy_error(self.y, self.t)
            return self.loss
    
        def backward(self, dout=1):
            batch_size = self.t.shape[0]
            dx = (self.y - self.t) / batch_size
            return dx
    

    反向传播法的神经网络的实现

    #数值微分计算梯度
    def numerical_gradient(f, x):
        h = 1e-4 # 0.0001
        grad = np.zeros_like(x)
        
        it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
        while not it.finished:
            idx = it.multi_index
            tmp_val = x[idx]
            x[idx] = float(tmp_val) + h
            fxh1 = f(x) # f(x+h)
            
            x[idx] = tmp_val - h 
            fxh2 = f(x) # f(x-h)
            grad[idx] = (fxh1 - fxh2) / (2*h)
            
            x[idx] = tmp_val # 还原值
            it.iternext()   
            
        return grad
    
    
    class TwoLayerNet:
        def __init__(self,input_size,hidden_size,output_size,weight_init_std=0.01):
            #初始化权重
            self.params={}
            self.params['W1']=weight_init_std*np.random.randn(input_size,hidden_size)
            self.params['b1']=np.zeros(hidden_size)
            self.params['W2']=weight_init_std*np.random.randn(hidden_size,output_size)
            self.params['b2']=np.zeros(output_size)
            
            #生成层
            self.layers=OrderedDict()
            self.layers['Affine1']=Affine(self.params['W1'],self.params['b1'])
            self.layers['ReLU']=ReLU()
            self.layers['Affine2']=Affine(self.params['W2'],self.params['b2'])
            self.lastLayer=SoftmaxWithLoss()
            
        def predict(self,x):
            for layer in self.layers.values():
                x=layer.forward(x)
            return x
        
        # x:输入数据, t:监督数据
        def loss(self,x,t):
            y=self.predict(x)
            return self.lastLayer.forward(y,t)
        
        def accuracy(self,x,t):
            y=self.predict(x)
            y=np.argmax(y,axis=1)
            if t.ndim != 1:
                t=np.argmax(t,axis=1)
            accuracy=np.sum(t==y)/float(x.shape[0])
            return accuracy
        
        #数值微分计算梯度
        def numerical_gradient(self, x, t):
            loss_W = lambda W: self.loss(x, t)
            grads = {}
            grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
            grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
            grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
            grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
            return grads
    
        #反向传播计算梯度
        def gradient(self,x,t):
            self.loss(x,t)
            dout=1
            dout=self.lastLayer.backward(dout)
            layers = list(self.layers.values())
            layers.reverse()
            for layer in layers:
                dout = layer.backward(dout)
            grads = {}
            grads['W1'] = self.layers['Affine1'].dW
            grads['b1'] = self.layers['Affine1'].db
            grads['W2'] = self.layers['Affine2'].dW
            grads['b2'] = self.layers['Affine2'].db
            return grads
    

    反向传播法的梯度确认

    (x_train, t_train), (x_test, t_test) =  load_mnist(normalize=True, one_hot_label = True)
    network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
    x_batch = x_train[:3]
    t_batch = t_train[:3]
    grad_numerical = network.numerical_gradient(x_batch, t_batch)
    grad_backprop = network.gradient(x_batch, t_batch)
    # 求各个权重的绝对误差的平均值
    for key in grad_numerical.keys():
        diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]) )
        print(key + ":" + str(diff))
    

    W1:4.988111386706321e-10
    b1:2.9053235452690136e-09
    W2:7.455528358404354e-09
    b2:1.3991754635733767e-07

    相关文章

      网友评论

          本文标题:Chapter 05 误差反向传播法

          本文链接:https://www.haomeiwen.com/subject/ejkjuqtx.html