美文网首页
神经网络

神经网络

作者: llsh2010 | 来源:发表于2018-02-06 23:40 被阅读0次

    神经网络模型

    1. 神经元模型

    常用的神经元激活函数

    import math
    import matplotlib.pyplot as plt
    #阶跃函数
    def llsh_sgn(x):
        if x >= 0:
            return 1
        else:
            return 0
    
    #Sigmoid函数
    def llsh_sigmoid(x):
        return 1/(1 + math.exp(-x))
    
    #绘图
    import numpy as num
    y = []
    x = num.arange(-100,100,0.1)
    for index in x:
         y.append(llsh_sigmoid(index))
    
    fig, ax = plt.subplots()
    ax.scatter(x, y , s = 0.6)
    
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    plt.show()
    

    感知器和多层网络

    误差逆向传播算法(error BackPropagation,BP)

    http://python.jobbole.com/82758/

    
    import matplotlib.pyplot as plt
    import numpy as np;
    import numpy as num
    #阶跃函数
    def llsh_sgn(x):
        if x >= 0:
            return 1
        else:
            return 0
    
    #Sigmoid函数
    def llsh_sigmoid(x):
        return 1/(1 + np.exp(-x))
    
    #Sigmoid函数的导数
    def llsh_sigmoid_div(x):
        return x * (1 - x)
    
    
    
    y = []
    x = num.arange(-10,10,0.1)
    for index in x:
         y.append(llsh_sigmoid(index))
    
    
    
    
    fig, ax = plt.subplots()
    #ax.scatter(x, y , s = 10)
    
    ax.set_xlabel('x')
    ax.set_ylabel('sigmoid')
    #plt.show()
    
    
    
    #单层神经元训练范例
    X = np.array([ [0.9,0,1],
                   [0,1,1],
                   [1,0,1],
                   [0.1,1,1] ])
    print("Input",X[:,0:2])
    
    ax.scatter(X[:,0].T, X[:,1].T , s = 10)
    ax.set_xlabel('x')
    ax.set_ylabel('sigmoid')
    plt.show()
    
    y = np.array([[0,0,1,1]]).T
    print("\nOutput",y)
    np.random.seed(1)
    syn0 = 2*np.random.random((3,1)) - 1
    
    print(syn0)
    for j in num.arange(0,1000,1):
        l0 = X
        #求出输出
        l1 = llsh_sigmoid(np.dot(l0, syn0))
        #
        l1_error = y - l1
        l1_delta = l1_error * llsh_sigmoid_div(l1)
        syn0 += np.dot(l0.T, l1_delta)
    
    print ("Output After Training:")
    print (l1)
    print (syn0)
    
    
    
    
    #多层神经网络
    X = np.array([ [0,0,1],[0,1,1],[1,0,1],[1,1,1] ])
    y = np.array([[0,1,1,0]]).T
    syn0 = 2*np.random.random((3,4)) - 1
    syn1 = 2*np.random.random((4,1)) - 1
    for j in num.arange(6000):
        l0 = X
        #由当前的参数计算出输出值
        l1 = llsh_sigmoid(np.dot(l0,syn0))
        l2 = llsh_sigmoid(np.dot(l1,syn1))
        #使用梯度向量法计算
        l2_delta = (y - l2)*llsh_sigmoid_div(l2)
        l1_delta = l2_delta.dot(syn1.T) * llsh_sigmoid_div(l1)
        syn1 += l1.T.dot(l2_delta)
        syn0 += l0.T.dot(l1_delta)
    print ("Output After Training:")
    print(l2)
    

    http://python.jobbole.com/82758/

    
    import matplotlib.pyplot as plt
    import numpy as np;
    import numpy as num
    #阶跃函数
    def llsh_sgn(x):
        if x >= 0:
            return 1
        else:
            return 0
    
    #Sigmoid函数
    def llsh_sigmoid(x):
        return 1/(1 + np.exp(-x))
    
    #Sigmoid函数的导数
    def llsh_sigmoid_div(x):
        return x * (1 - x)
    
    
    
    y = []
    x = num.arange(-10,10,0.1)
    for index in x:
         y.append(llsh_sigmoid(index))
    
    
    
    
    fig, ax = plt.subplots()
    #ax.scatter(x, y , s = 10)
    
    ax.set_xlabel('x')
    ax.set_ylabel('sigmoid')
    #plt.show()
    
    
    
    #单层神经元训练范例
    X = np.array([ [0.9,0,1],
                   [0,1,1],
                   [1,0,1],
                   [0.1,1,1] ])
    print("Input",X[:,0:2])
    
    ax.scatter(X[:,0].T, X[:,1].T , s = 10)
    ax.set_xlabel('x')
    ax.set_ylabel('sigmoid')
    plt.show()
    
    y = np.array([[0,0,1,1]]).T
    print("\nOutput",y)
    np.random.seed(1)
    syn0 = 2*np.random.random((3,1)) - 1
    
    print(syn0)
    for j in num.arange(0,1000,1):
        l0 = X
        #求出输出
        l1 = llsh_sigmoid(np.dot(l0, syn0))
        #
        l1_error = y - l1
        l1_delta = l1_error * llsh_sigmoid_div(l1)
        syn0 += np.dot(l0.T, l1_delta)
    
    print ("Output After Training:")
    print (l1)
    print (syn0)
    
    
    
    
    #多层神经网络
    X = np.array([ [0,0,1],[0,1,1],[1,0,1],[1,1,1] ])
    y = np.array([[0,1,1,0]]).T
    syn0 = 2*np.random.random((3,4)) - 1
    syn1 = 2*np.random.random((4,1)) - 1
    for j in num.arange(6000):
        l0 = X
        #由当前的参数计算出输出值
        l1 = llsh_sigmoid(np.dot(l0,syn0))
        l2 = llsh_sigmoid(np.dot(l1,syn1))
        #使用梯度向量法计算
        l2_delta = (y - l2)*llsh_sigmoid_div(l2)
        l1_delta = l2_delta.dot(syn1.T) * llsh_sigmoid_div(l1)
        syn1 += l1.T.dot(l2_delta)
        syn0 += l0.T.dot(l1_delta)
    print ("Output After Training:")
    print(l2)
    

    全局最小和局部最小

    使用梯度下降法很容易陷入局部最小之值,可以考虑使用如下方法解决

    • 使用不同的初始值
    • 模拟退火算法
    • 使用随机梯度下降

    不过这些方法是启发性质的,理论上尚无法保障

    其他常见的神经网络

    • RBF神经网络
    • ART神经网络

    相关文章

      网友评论

          本文标题:神经网络

          本文链接:https://www.haomeiwen.com/subject/negnzxtx.html