美文网首页
neural network入门

neural network入门

作者: echo_ye4 | 来源:发表于2019-12-20 14:11 被阅读0次

导读

sigmoid
softmax
neural network training

sigmoid (logistic regression)

image.png
image.png
image.png
import numpy as np
from numpy.linalg import cholesky
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

sampleNo = 100
mu = np.array([[2, 3]])
Sigma = np.array([[1, 0.5], [1.5, 3]])
R = cholesky(Sigma)
s = np.dot(np.random.randn(sampleNo, 2), R) + mu

mu2 = np.array([[7, 6]])
t = np.dot(np.random.randn(sampleNo, 2), R) + mu2

plt.plot(s[:,0],s[:,1],'+')
plt.plot(t[:,0],t[:,1],'*')
plt.show()
image.png
#构造数据
x = np.concatenate((s, t)).T
y1 = np.zeros(100).reshape(1,100)
y2 = np.ones(100).reshape(1,100)
y = np.concatenate((y1, y2), axis=1)
#初始化参数
w = np.random.randn(2) / 2
b = np.random.randn(1)

def logistic(z):
    return 1 / (1 + np.exp(-z))

def loss(a, y):
    return -(y * np.log(a) + (1-y) * np.log(1-a))

#gradient descent训练参数
iter = 1
m = 200
alpha = 0.1
while iter < 5000:
    z = np.dot(w.T, x) + b
    a = logistic(z)
    #l = np.sum(loss(a, y)) / m
    #print(iter, l)
    dz = a - y
    dw = np.sum(np.dot(x, dz.T), axis=1) / m
    db = np.sum(dz) / m
    w = w - alpha * dw
    b = b - alpha * db
    iter += 1

#显示结果
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(x[0], x[1], y)
x1_tmp = x2_tmp = np.linspace(-5, 10, 100)
x1_tmp, x2_tmp = np.meshgrid(x1_tmp, x2_tmp)
x_tmp = np.concatenate((x1_tmp.reshape(1, 10000), x2_tmp.reshape(1, 10000)))
z_tmp = np.dot(w.T, x_tmp) + b
a_tmp = logistic(z_tmp)
y_prected = a_tmp.reshape(100, 100)
ax.plot_surface(x1_tmp, x2_tmp, y_prected)
ax.view_init(elev=15,azim=-5)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.show()
image.png

softmax

image.png
image.png
import numpy as np
from numpy.linalg import cholesky
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

sampleNo = 100
mu = np.array([[2, 3]])
Sigma = np.array([[1, 0.5], [1.5, 3]])
R = cholesky(Sigma)
r = np.dot(np.random.randn(sampleNo, 2), R) + mu

mu2 = np.array([[6, 6]])
s = np.dot(np.random.randn(sampleNo, 2), R) + mu2

mu3 = np.array([[1, 6]])
t = np.dot(np.random.randn(sampleNo, 2), R) + mu3

mu4 = np.array([[9, 4]])
v = np.dot(np.random.randn(sampleNo, 2), R) + mu4

plt.plot(r[:,0],r[:,1],'+')
plt.plot(s[:,0],s[:,1],'*')
plt.plot(t[:,0],t[:,1],'.')
plt.plot(v[:,0],v[:,1],'o')
plt.show()
image.png
#构造数据
x = np.concatenate((r,s,t,v))
x = x.T
y_tmp1 = np.array([1,0,0,0]).reshape(4,1)
y_tmp2 = np.array([0,1,0,0]).reshape(4,1)
y_tmp3 = np.array([0,0,1,0]).reshape(4,1)
y_tmp4 = np.array([0,0,0,1]).reshape(4,1)
ones = np.ones(100).reshape(1, 100)
y1 = np.dot(y_tmp1, ones)
y2 = np.dot(y_tmp2, ones)
y3 = np.dot(y_tmp3, ones)
y4 = np.dot(y_tmp4, ones)
y = np.concatenate((y1, y2, y3, y4), axis=1)
#初始化参数
w = np.random.randn(8) / 8
w = w.reshape(2,4)
b = np.random.randn(4) / 4
b = b.reshape(1,4)

def softmax(z):
    z_exp = np.exp(z)
    z_sum = np.sum(z_exp, axis=0)
    return z_exp / z_sum

#gradient descent
m = 400
alpha = 0.1
iter = 1
while iter < 5000:
    z = np.dot(w.T, x) + b.T
    a = softmax(z)
    l = -np.sum(y * np.log(a)) / m
    print(l)
    dz = a - y
    dw = np.dot(x, dz.T) / m
    db = np.sum(dz.T, axis=0) / m
    w = w - alpha * dw
    b = b - alpha * db
    iter += 1

def predict(x):
    z = np.dot(w.T, x) + b.T
    a = softmax(z)
    return np.argmax(a, axis=0)

#显示结果
x1 = x2 = np.linspace(-2, 15, 100)
x1, x2 = np.meshgrid(x1, x2)
x1 = x1.reshape(1, 10000)
x2 = x2.reshape(1, 10000)
x_tmp = np.concatenate((x1, x2))
y_tmp = predict(x_tmp)
x_0 = x_tmp[:, y_tmp==0]
plt.scatter(x_0[0], x_0[1], alpha=0.1)
x_1 = x_tmp[:, y_tmp==1]
plt.scatter(x_1[0], x_1[1], alpha=0.1)
x_2 = x_tmp[:, y_tmp==2]
plt.scatter(x_2[0], x_2[1], alpha=0.1)
x_3 = x_tmp[:, y_tmp==3]
plt.scatter(x_3[0], x_3[1], alpha=0.1)

plt.plot(r[:,0],r[:,1],'+', alpha=1)
plt.plot(s[:,0],s[:,1],'*', alpha=1)
plt.plot(t[:,0],t[:,1],'.', alpha=1)
plt.plot(v[:,0],v[:,1],'o', alpha=1)
plt.show()
image.png

neural network

假设一个三层网络,输入3个节点,第一层4个节点,第二层2个节点,第三层(输出)1个节点,假设所有层的activator都是sigmoid


image.png
image.png
#构造数据
x = np.random.randn(18).reshape(3, 6)
y1 = np.ones(3).reshape(1,3)
y2 = np.zeros(3).reshape(1,3)
y = np.concatenate((y1, y2), axis=1)
#初始化参数
w1 = np.random.randn(12) / 12
w1 = w1.reshape(3,4)
b1 = np.random.randn(4).reshape(1, 4)
w2 = np.random.randn(8) / 8
w2 = w2.reshape(4,2)
b2 = np.random.randn(2).reshape(1, 2)
w3 = np.random.randn(2) / 2
w3 = w3.reshape(2,1)
b3 = np.random.randn(1).reshape(1, 1)

#gradient descent
iter = 1
m = 6
alpha = 0.1
while iter < 100:
    z1 = np.dot(w1.T, x) + b1.T
    a1 = logistic(z1)
    
    z2 = np.dot(w2.T, a1) + b2.T
    a2 = logistic(z2)
    
    z3 = np.dot(w3.T, a2) + b3.T
    a3 = logistic(z3)
        
    dz3 = a3 - y
    dw3 = np.dot(a2, dz3.T) / m
    db3 = np.sum(dz3.T, axis=0) / m
    
    dz2 = np.dot(w3, dz3) * z2 * (1 - z2)
    dw2 = np.dot(a1, dz2.T) / m
    db2 = np.sum(dz2.T, axis=0) / m
    
    dz1 = np.dot(w2, dz2) * z1 * (1 - z1)
    dw1 = np.dot(x, dz1.T) / m
    db1 = np.sum(dz1.T, axis=0) / m
    
    w3 = w3 - alpha * dw3
    b3 = b3 - alpha * db3
    w2 = w2 - alpha * dw2
    b2 = b2 - alpha * db2
    w1 = w1 - alpha * dw1
    b1 = b1 - alpha * db1
    
    iter += 1

常用的activatior及其导数:

𝑠𝑖𝑔𝑚𝑜𝑖𝑑: 𝑦=\frac 1{1+e^{-x}} , 𝑦′=𝑦(1−𝑦),值域为[0, 1],常用于二类分类

𝑡𝑎𝑛ℎ: 𝑦=\frac {e^x-e^{-x}}{e^x+e^{-x}} , 𝑦′=1−𝑦^2,sigmoid的优化版,值域为[-1, 1],常用于隐藏层,使均值为0

𝑟𝑒𝑙𝑢: y = max(0, x), y'= \begin{cases} 1& x>=0 \\ 0& else \end{cases},保留输入值,为隐藏层的默认activator

相关文章

网友评论

      本文标题:neural network入门

      本文链接:https://www.haomeiwen.com/subject/pqfinctx.html