美文网首页
神经网络04

神经网络04

作者: 平头哥2 | 来源:发表于2020-09-21 11:18 被阅读0次

    梯度下降法

    抽取一个公共函数模块: common_functions.py

    import numpy as np
    
    def sigmoid(x):
        return 1/(1 + np.exp(-x))
    
    def softmax(a):
        c = np.max(a)  # 为了方式溢出,取信号的最大值
        exp_a = np.exp(a - c)
        sum_exp_a = np.sum(exp_a)
        y = exp_a / sum_exp_a
        return y
    
    
    def cross_entropy_error(y, t):
        if y.ndim == 1:
            t = t.reshape(1, t.size)
            y = y.reshape(1, y.size)
    
        batch_size2 = y.shape[0]
        return -np.sum(t * np.log(y + 1e-7)) / batch_size2
    
    
    def numeric_diff(f, x):
        h = 1e-4  # 0.0001
        return (f(x + h) - f(x - h)) / 2 * h
    
    
    def numeric_gradient(f, x):
        h = 1e-4  # 0.0001
        grad = np.zeros_like(x)  # 生成 和x形状相同的数组
    
        for idx in range(x.size):
            tmp_val = x[idx]
    
            # 计算f(x+h)
            x[idx] = tmp_val + h
            fxh1 = f(x)
    
            # 计算f(x-h)
            x[idx] = tmp_val - h
            fxh2 = f(x)
    
            grad[idx] = (fxh1 - fxh2) / (2 * h)
            x[idx] = tmp_val  # 还原值
    
        return grad
    
    
    def gradient_descent(f, init_x, lr=0.01, step_num=100):
        x = init_x
        for i in range(step_num):
            grad = numeric_gradient(f, x)
            x -= lr * grad
    
        return x
    
    

    使用梯度法求函数最小值

    from common_functions import gradient_descent
    def function_2(x):
        return x[0] ** 2 + x[1] ** 2
    init_x = np.array([-3.0, 4.0])
    print(gradient_descent(function_2, init_x = init_x, lr = 0.1)) # [-6.11110793e-10  8.14814391e-10]
    

    神经网络的梯度

    这里的梯度是说: 损失函数关于权重参数的梯度

    例如:损失函数用L表示,权重参数用 W 表示
    W = \left[ \begin{matrix} \omega_{11} & \omega_{12} & \omega_{13}\\ \omega_{21} & \omega_{22} & \omega_{23} \\ \end{matrix} \right]

    \frac{\partial L}{\partial W} = \left[ \begin{matrix} \frac{\partial L}{\partial \omega_{11}} & \frac{\partial L}{\partial \omega_{12}} & \frac{\partial L}{\partial \omega_{13}}\\ \frac{\partial L}{\partial \omega_{21}} & \frac{\partial L}{\partial \omega_{22}} & \frac{\partial L}{\partial \omega_{23}} \\ \end{matrix} \right]
    定义一个类,实现求梯度

    import numpy as np
    from common_functions import softmax, cross_entropy_error, numeric_gradient
    
    
    class simpleNet:
    
        def __init__(self):
            self.W = np.random.randn(2, 3)
    
        def predict(self, x):
            return np.dot(x, self.W)
    
        def loss(self, x, t):
            z = self.predict(x)
            y = softmax(z)
            loss = cross_entropy_error(y, t)
    
            return loss
    

    测试代码

    import sys, os
    
    import numpy as np
    
    from simpleNet import simpleNet  # 导入类
    from common_functions import cross_entropy_error, gradient_descent, numeric_gradient
    
    net = simpleNet()
    
    # print(net) # <simpleNet.simpleNet object at 0x000001B917A2D940>
    
    
    print(net.W)
    # [[-0.35629671  0.13281832 -0.30492983]
    #  [ 0.4057684  -0.61784676  2.64085429]]
    
    
    x = np.array([0.6, 0.9])
    
    p = net.predict(x)
    
    print(p)  # [-0.7061077   1.25578435 -1.02561033]
    
    print(np.argmax(p))  # 2
    
    t = np.array([0, 0, 1])
    
    print(net.loss(x, t))  # 0.3955737658935095
    
    
    f = lambda w: net.loss(x, t)
    
    print(f) # <function <lambda> at 0x000001B9194D3280>
    
    dW = numeric_gradient(f, net.W)
    
    print(dW)
    

    神经网络的学习步骤

    1. 从训练数据中随机选出来一部分数据,称之为mini-batch, 目标是减小mini-batch的损失函数的值
    2. 为了减小mini-batch的损失函数的值,需要求出来各个权重参数的梯度,梯度表示损失函数的值减小最多的方向
    3. 将权重参数沿梯度方向进行微小更新
    4. 重复步骤1,2,3

    2层神经网络的类 ()

    import sys, os
    import numpy as np
    from common_functions import sigmoid, softmax, cross_entropy_error, numeric_gradient
    
    
    def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
        # 初始化值
        self.param = {}
        self.param['W1'] = weight_init_std / np.random.randn(input_size, hidden_size)
        self.param['b1'] = np.zeros(hidden_size)
        self.param['W2'] = weight_init_std / np.random.randn(hidden_size, output_size)
        self.param['b2'] = np.zeros(output_size)
    
    
    def predict(self, x):
        W1, W2 = self.param("W1"), self.param("W2")
        b1, b2 = self.param("b1"), self.param("b2")
    
        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)
        return y
    
    
    ## x: 输入数据 t: 监督数据
    
    def loss(self, x, t):
        y = predict(self, x)
        return cross_entropy_error(y, t)
    
    
    def accuracy(self, x, t):
        y = predict(self, x)
        y = np.argmax(y, axis=1)
        t = np.argmax(t, axis=1)
        accuracy = np.sum(y == t) / float(x.shape[0])
        return accuracy
    
    
    ## x: 输入数据 t: 监督数据
    
    def numerical_gradient(self, x, t):
        loss_W = lambda W: self.loss(x, t)
        grads = {}
        grads['W1'] = numeric_gradient(loss_W, self.param('W1'))
        grads['b1'] = numeric_gradient(loss_W, self.param('b1'))
        grads['W2'] = numeric_gradient(loss_W, self.param('W2'))
        grads['b2'] = numeric_gradient(loss_W, self.param('b2'))
        return grads
    
    

    mini-batch的实现

    import sys, os
    
    print(os.getcwd())
    sys.path.append(os.getcwd())
    
    from mnist import load_mnist
    import numpy as np
    
    from two_layer_net import TwoLayerNet  # 导入类
    from common_functions import cross_entropy_error, gradient_descent, numeric_gradient
    
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=False, one_hot_label=True)
    
    train_loss_list = []
    # 超参数
    iters_num = 10000
    train_size = x_train.shape[0]
    batch_size = 100
    learning_rate = 0.1
    
    network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
    
    for i in range(iters_num):
        #获取mini-batch的数据
        batch_mask = np.random.choice(train_size, batch_size)
        x_batch = x_train[batch_size]
        t_batch = t_train[batch_size]
    
        #计算梯度
        grad = network.numerical_gradient(x_batch, t_batch)
    
        #更新参数
    
        for key in ['W1', 'b1', 'W2', 'b2']:
            network.param[key] -= learning_rate * grad[key]
    
        #记录学习过程
    
        loss = network.loss(x_batch, t_batch)
        train_loss_list.append(loss)
    
    
    print(train_loss_list)
    

    相关文章

      网友评论

          本文标题:神经网络04

          本文链接:https://www.haomeiwen.com/subject/wycvyktx.html