美文网首页深度学习笔记
L1W4多层神经网络--深度学习笔记

L1W4多层神经网络--深度学习笔记

作者: Sunooo | 来源:发表于2020-06-28 19:31 被阅读0次

    如何使用多层神经网络来进行预测?
    在使用jupyter notebook做完两道编程题之后,实在厌倦了没有提示的环境下代码,也曾尝试过安装插件,但是效果不理想。
    所以下载了pyCharm 2020版,30天的免费也足够当下用了。

    多层神经网络的步骤与单层神经网络的思路和步骤大同小异,具体的就是公式不一样。

    个人总结的构建神经网络的步骤
    1.定义神经网络结构,确定输入项,输出项和隐藏层
    2.构造初始化函数,得到初始W和b参数
    3.构造正向传播函数,选择合适的激活函数,例如sigmoid, tanh 等
    4.构造成本函数,计算损失J
    5.构造反向传播函数,得到参数的梯度
    6.构造优化函数,设置学习率,对参数进行更新
    7.合成神经网络模型函数,输出结果为参数
    8.构造预测函数,根据神经网络模型获得的参数机型预测分析,得到准确率

    python编写代码的时候,一定要注意左侧对齐,例如for循环就是按照左侧对齐来判断for的作用域的,如果对齐错了,就会产生bug。
    然后使用plt绘图的时候,要使用.show()才能输出图片。

    在开始之前需要先准备测试数据文件和工具类
    链接: https://pan.baidu.com/s/1tb-QT1AamPB8N4XMSVLPnQ 密码: m8pi
    如果缺少相应库的话,也需要安装,如果做了前面两道题,应该都已经安装完毕了。

    下面就上代码了

    import numpy as np
    from testCases import linear_forward_test_case, linear_activation_forward_test_case, L_model_forward_test_case, \
        linear_backward_test_case, linear_activation_backward_test_case, update_parameters_test_case, L_model_backward_test_case
    import h5py
    import matplotlib.pyplot as plt
    from dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward
    import lr_utils
    
    np.random.seed(1)
    
    # 初始化模型参数, 两层网络
    def initialize_parameters(n_x, n_h, n_y):
        print(n_h)
        W1 = np.random.randn(n_h, n_x) * 0.01
        b1 = np.zeros((n_h, 1))
        W2 = np.random.randn(n_y, n_h) * 0.01
        b2 = np.zeros((n_y, 1))
    
        assert (W1.shape == (n_h, n_x))
        assert (b1.shape == (n_h, 1))
        assert (W2.shape == (n_y, n_h))
        assert (b2.shape == (n_y, 1))
    
        parameters = {
            "W1": W1,
            "b1": b1,
            "W2": W2,
            "b2": b2,
                }
    
        return parameters
    
    
    # 初始化多层神经网络,L层, layers_dims 为包含网络中每个图层的节点数量的列表
    def initialize_parameters_deep(layers_dims):
        np.random.seed(3)
        parameters = {}
        L = len(layers_dims)
    
        for l in range(1, L):
            parameters["W" + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) / np.sqrt(layers_dims[l - 1])
            parameters["b" + str(l)] = np.zeros((layers_dims[l], 1))
            assert (parameters["W" + str(l)].shape == (layers_dims[l], layers_dims[l - 1]))
            assert (parameters["b" + str(l)].shape == (layers_dims[l], 1))
    
        return parameters
    
    
    # 线性正向传播
    def linear_forward(A, W, b):
    
    
        Z = np.dot(W, A) + b
        assert(Z.shape == (W.shape[0], A.shape[1]))
        cache = (A, W, b)
        return Z, cache
    
    
    #正向传播+激活函数,包括sigmoid和relu两种激活函数
    def linear_activation_forward(A_prev, W, b, activation):
    
        if activation == "sigmoid":
            Z, linear_cache = linear_forward(A_prev, W, b)
            A, activation_cache = sigmoid(Z)
        elif activation == "relu":
            Z, linear_cache = linear_forward(A_prev, W, b)
            A, activation_cache = relu(Z)
    
        assert (A.shape == (W.shape[0], A_prev.shape[1]))
        cache = (linear_cache, activation_cache)
    
        return A, cache
    
    
    #正向传播+L层+激活函数,L-1层都是relu,L层是sigmoid
    def L_model_forward(X, parameters):
        caches = []
        A = X
        L = len(parameters) // 2
    
    
        for l in range(1, L):
            A_prev = A
            A, cache = linear_activation_forward(A_prev, parameters["W" + str(l)], parameters["b" + str(l)], activation="relu")
    
            caches.append(cache)
    
        AL, cache = linear_activation_forward(A, parameters["W" + str(L)], parameters["b" + str(L)], activation="sigmoid")
        caches.append(cache)
    
        assert (AL.shape == (1, X.shape[1]))
    
        return AL, caches
    
    #构造成本函数,计算损失
    def compute_cost(AL, Y):
        m = Y.shape[1]
        cost = -np.sum(np.multiply(np.log(AL), Y) + np.multiply(np.log(1 - AL), 1 - Y)) / m
    
        cost = np.squeeze(cost)
        assert (cost.shape == ())
    
        return cost
    
    #线性反向传播
    def linear_backward(dZ, cache):
        A_prev, W, b = cache
        m = A_prev.shape[1]
    
        dW = 1 / m * np.dot(dZ, A_prev.T)
        db = 1 / m * np.sum(dZ, axis=1, keepdims=True)
        dA_prev = np.dot(W.T, dZ)
    
    
        assert (dA_prev.shape == A_prev.shape)
        assert (dW.shape == W.shape)
        assert (db.shape == b.shape)
    
        return dA_prev, dW, db
    
    #反向传播+激活函数,可选relu和sigmoid
    def linear_activation_backward(dA, cache, activation):
    
        linear_cache, activation_cache = cache
    
        if activation == "relu":
    
            dZ = relu_backward(dA, activation_cache)
            dA_prev, dW, db = linear_backward(dZ, linear_cache)
    
    
        elif activation == "sigmoid":
    
            dZ = sigmoid_backward(dA, activation_cache)
            dA_prev, dW, db = linear_backward(dZ, linear_cache)
    
        return dA_prev, dW, db
    
    #反向传播+L层+激活函数,L层的激活函数是sigmoid,其他L-1层的激活函数是relu
    def L_model_backward(AL, Y, caches):
    
        grads = {}
        L = len(caches)
        m = AL.shape[1]
        Y = Y.reshape(AL.shape)
    
        #用损失函数的导数求dAL
        dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
    
        current_cache = caches[L - 1]
        grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache,
                                                                                                      activation="sigmoid")
    
    
        for l in reversed(range(L - 1)):
            current_cache = caches[l]
            dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 2)], current_cache,
                                                                        activation="relu")
            grads["dA" + str(l + 1)] = dA_prev_temp
            grads["dW" + str(l + 1)] = dW_temp
            grads["db" + str(l + 1)] = db_temp
    
    
        return grads
    
    #优化函数,设置学习率,对参数进行更新
    def update_parameters(parameters, grads, learning_rate):
    
        L = len(parameters) // 2
    
        for l in range(L):
            parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * grads["dW" + str(l + 1)]
            parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * grads["db" + str(l + 1)]
    
    
        return parameters
    
    
    #搭建两层神经网络
    def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost = False, isPlot = True):
        np.random.seed(1)
        grads = {}
        costs = []
        (n_x, n_h, n_y) = layers_dims
    
        parameters = initialize_parameters(n_x, n_h, n_y)
    
        W1 = parameters["W1"]
        b1 = parameters["b1"]
        W2 = parameters["W2"]
        b2 = parameters["b2"]
    
    
        for i in range(0, num_iterations):
            A1, cache1 = linear_activation_forward(X, W1, b1, "relu")
            A2, cache2 = linear_activation_forward(A1, W2, b2, "sigmoid")
    
            cost = compute_cost(A2, Y)
    
            dA2 = -(np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
            dA1, dW2, db2 = linear_activation_backward(dA2, cache2, "sigmoid")
            dA0, dW1, db1 = linear_activation_backward(dA1, cache1, "relu")
    
            grads["dW1"] = dW1
            grads["db1"] = db1
            grads["dW2"] = dW2
            grads["db2"] = db2
    
    
    
            parameters = update_parameters(parameters, grads, learning_rate)
    
            W1 = parameters["W1"]
            b1 = parameters["b1"]
            W2 = parameters["W2"]
            b2 = parameters["b2"]
    
            if i % 100 == 0 :
                costs.append(cost)
    
                if print_cost:
                    print("第", i, "次迭代,成本值为: ", np.squeeze(cost))
    
        if isPlot:
            plt.plot(np.squeeze(costs))
            plt.ylabel("cost")
            plt.xlabel("iterations (per tens)")
            plt.title("Learning rate = " + str(learning_rate))
    
            plt.show()
    
            return parameters
    
    '''
    #测试两层神经网络 
    train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = lr_utils.load_dataset()
    
    train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
    test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
    
    train_x = train_x_flatten / 255
    train_y = train_set_y
    test_x = test_x_flatten / 255
    test_y = test_set_y
    
    n_x = 12288
    n_h = 7
    n_y = 1
    layers_dims = (n_x,n_h,n_y)
    print(train_x)
    print(train_set_y)
    parameters = two_layer_model(train_x, train_set_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True,isPlot=True)
    '''
    # 构建预测函数
    def predict(X, Y, parameters):
    
        m = X.shape[1]
        n = len(parameters) // 2
        p = np.zeros((1, m))
    
        probas, caches = L_model_forward(X, parameters)
    
        for i in range(0, probas.shape[1]):
            if probas[0, i] > 0.5:
                p[0, i] = 1
            else:
                p[0, i] = 0
    
        print("准确度为: " + str(float(np.sum((p == Y)) / m)))
    
        return p
    #测试两层神经网络
    #predictions_train = predict(train_x, train_y, parameters)
    #predictions_test = predict(test_x, test_y, parameters)
    
    
    #搭建多层神经网络
    def L_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False, isPlot=True):
    
        np.random.seed(1)
        costs = []
    
        parameters = initialize_parameters_deep(layers_dims)
    
        for i in range(0, num_iterations):
            AL, caches = L_model_forward(X, parameters)
            cost = compute_cost(AL, Y)
            grads = L_model_backward(AL, Y, caches)
            parameters = update_parameters(parameters, grads, learning_rate)
    
    
            if i % 100 == 0:
    
                costs.append(cost)
    
                if print_cost:
                    print("第", i, "次迭代,成本值为:", np.squeeze(cost))
    
        if isPlot:
            plt.plot(np.squeeze(costs))
            plt.ylabel('cost')
            plt.xlabel('iterations (per tens)')
            plt.title("Learning rate =" + str(learning_rate))
            plt.show()
        return parameters
    
    
    #测试多层神经网络
    
    train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = lr_utils.load_dataset()
    
    train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
    test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
    
    train_x = train_x_flatten / 255
    train_y = train_set_y
    test_x = test_x_flatten / 255
    test_y = test_set_y
    
    layers_dims = [12288, 20, 7, 5, 1]
    parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True,isPlot=False)
    
    pred_train = predict(train_x, train_y, parameters)
    pred_test = predict(test_x, test_y, parameters)
    
    
    
    '''
    L1W2的编程题,结果正确率为0.70
    二层网络,结果正确率为0.72
    多层网络,结果正确率为0.78
    说明添加神经网络层数,可以提高预测的正确率
    
    '''
    #查看谁在L层被错误标记
    def print_mislabeled_images(classes, X, Y, p):
    
        a = p + Y
        mislabeled_indices = np.asarray(np.where(a == 1))
        plt.rcParams['figure.figsize'] = (40.0, 40.0)
        num_images = len(mislabeled_indices[0])
        for i in range(num_images):
            index = mislabeled_indices[1][i]
    
            plt.subplot(2, num_images, i + 1)
            plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
            plt.axis('off')
            plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[Y[0,index]].decode("utf-8"))
        plt.show()
    
        return 0
    
    
    
    print_mislabeled_images(classes, test_x, test_y, pred_test)
    
    
    

    代码编写参考了以下两位博主的文章,在此感谢他们的无私奉献。
    https://blog.csdn.net/u013733326/article/details/79767169
    https://www.kesci.com/home/project/5dd798fbf41512002ceb38de

    相关文章

      网友评论

        本文标题:L1W4多层神经网络--深度学习笔记

        本文链接:https://www.haomeiwen.com/subject/gftgfktx.html