美文网首页Python machine learning-sklearning
随机梯度下降(Stochastic gradient desce

随机梯度下降(Stochastic gradient desce

作者: 阿发贝塔伽马 | 来源:发表于2017-07-02 23:25 被阅读18次
    系数比之前多了一个分母m
    批量梯度下降法,同上一篇方法,下面看随机梯度法,随机梯度通过一个样本更新所有w,类似笔记一
    
    import pandas as pd
    import numpy as np
    import matplotlib
    import matplotlib.pyplot as plt
    
    df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
    y = df.iloc[0:100, 4].values
    y = np.where(y=='Iris-setosa',1,-1)
    x = df.iloc[0:100, [0,2]].values
    
    x_std = np.copy(x)
    x_std[:, 0] = (x[:,0]-x[:,0].mean())/x[:,0].std()
    x_std[:, 1] = (x[:,1]-x[:,1].mean())/x[:,1].std()
    
    class Perceptron():
        def __init__(self, eta, X, Y, N):
            self.eta = eta
            self.X = X
            self.Y = Y
            self.N = N
            self.w = [0]*len(X[0])
            self.w0 = 0
            self.m = len(X)
            self.n = len(X[0])
        def _shuffle(self, X, y):
            """Shuffle training data"""
            r = np.random.permutation(len(y))
            return X[r], y[r]
        def output_y(self, x):
            z = np.dot(x,self.w)+self.w0
            return z
        def training(self):
            self.errors = []
            for times in xrange(self.N):
                error = 0
                self.X, self.Y = self._shuffle(self.X, self.Y)
                for i in xrange(self.m):
                    delta_y = self.Y[i]-self.output_y(self.X[i])
                    error += 0.5*delta_y**2
                    self.w0 += self.eta*delta_y
                    self.w += self.eta*delta_y*self.X[i]
                self.errors.append(error/self.m)
                
    per = Perceptron(0.01, x_std, y, 16)
    
    per.training()
    
    print per.w0,per.w
    
    def f(x, y):
        z = per.w0+np.dot(per.w,zip(x,y))
        z = np.where(z>0,1,-1)
        return z
    
    n = 200
    
    mx = np.linspace(-3, 3, n)
    my = np.linspace(-2, 2, n)
    # 生成网格数据
    X, Y = np.meshgrid(mx, my)
    fig, axes = plt.subplots(1,2)
    axes0, axes1 = axes.flatten()
    axes0.plot(per.errors, marker='o')
    axes0.set_title('errors')
    axes1.contourf(X, Y, f(X, Y), 2, alpha = 0.75, cmap = plt.cm.RdBu)
    axes1.scatter(x_std[:,0][0:50], x_std[:, 1][0:50],s=80,edgecolors='r', marker='o')
    axes1.scatter(x_std[:,0][50:100], x_std[:, 1][50:100], marker='x', color='g')
    axes1.annotate(r'versicolor',xy=(5.5,4.5),xytext=(4.5,5.5),arrowprops=dict(arrowstyle='->', facecolor='blue'))
    axes1.annotate(r'setosa',xy=(5.8,2),xytext=(6.5,3),arrowprops=dict(arrowstyle='->', facecolor='blue'))
    fig.set_size_inches(12.5, 7.5)
    
    plt.subplots_adjust(left=0.01, right= 0.9, bottom=0.1, top=0.5)
    plt.show()
    

    [5]:http://latex.codecogs.com/gif.latex?\mathbf{J(w)}=\frac{1}{2m}\sum{(y{(i)}-\phi(z{(i)}))}^2

    相关文章

      网友评论

        本文标题:随机梯度下降(Stochastic gradient desce

        本文链接:https://www.haomeiwen.com/subject/ywcocxtx.html