逻辑回归原理分析与python实现

作者: 牛顿学计算机 | 来源:发表于2018-09-26 09:12 被阅读20次

    前言

    逻辑回归是统计学习中的经典分类算法,如:可用于二分类

    逻辑回归有以下几个特点:

    优点:计算代价不高,易于理解和实现

    缺点:容易欠拟合,分类精度可能不高

    适用数据类型:数值型和标称型数据

    二项逻辑回归模型的数学推导

    设{x, y}是输入样本,y = 1表示正类,y = 0表示负类。那么y = 1的概率和y = 0的概率可以表示为:

    image

    w是权值,b称为偏置

    模型参数估计

    逻辑回归常用的方法是极大似然估计,从而得到回归模型。


    2.PNG

    这样问题就变成了对对数似然函数为目标函数的最优化问题,常用的方法是梯度下降法以及拟牛顿法,本章采用梯度上升法和随机梯度上升法来求该模型的最优化问题。


    3.PNG
    但是这样我们无法直接求出L(w)的最大值对应的w,那么就可以采用梯度上升法求解这个问题。
    4.PNG

    沿着梯度的方向,每次移动一个布长,直到达到最大值。

    公式exp(x) / (1 + exp(x))python代码的实现:

    def sigmod(inx):
            return exp(inx) / (1 + exp(inx))
    

    梯度上升法代码的实现:

    #梯度上升法
    def grad_ascent(data_matin, class_label):
            data_matrix = mat(data_matin)   #将data_matin转为100 * 3矩阵
            label_matrix = mat(class_label).transpose()   #将class_label转为100 * 1的矩阵
            m,n = shape(data_matrix)  #m = 100, n = 100
            alpha = 0.001
            max_cycles = 500
            weights = ones((n, 1))  #生成100 * 1权值的单位矩阵
            for k in range(max_cycles):
                h = sigmod(data_matrix * weights) 
                error = (label_matrix - h)
                weights = weights + alpha * data_matrix.transpose() * error
            return weights
    

    随机梯度上升法代码的实现

    #随机梯度上升法
    def stoc_grad_ascent0(data_matrix, class_label):
        m,n = shape(data_matrix)
        alpha = 0.01
        weights = ones(n)   #创建权值一维数组
        #print(weights)
        for i in range(m):
            h = sigmod(sum(data_matrix[i] * weights))
            error = class_label[i] - h
            temp = []
            for k in data_matrix[i]:
                temp.append(alpha * error * k)
            #print(temp)
            weights = weights + temp
    
        return weights
    

    改进型随机梯度上升法代码的实现:

    #改进型随机梯度上升法
    def stoc_grad_ascent1(data_matrix, class_label, num_iter = 150):
        m,n = shape(data_matrix)
        weights = ones(n)
        data_index = range(m)
        for j in range(num_iter):
            for i in range(m):
                alpha = 4 / (1 + j + i) + 0.01
                rand_index = int(random.uniform(0, len(data_index)))
                h = sigmod(sum(data_matrix[rand_index] * weights))
                error = class_label[rand_index] - h
                temp = []
                for k in data_matrix[rand_index]:
                    temp.append(alpha * error * k)
                weights = weights + temp
                #del(data_index[rand_index])
    
        return weights
    

    完整python实现代码如下:

    from numpy import *
    import matplotlib.pyplot as plt
    import random
    
    def load_data_set():
        data_mat = []
        label_mat = []
        fr = open("test_set.txt")
        for lines in fr.readlines():  #读取每一行数据
            line_arr = lines.strip().split()  #将每一行分隔数据作为一个列表
            data_mat.append([1.0, float(line_arr[0]), float(line_arr[1])])  #x0 = 1 x1 = line_arr[0] x2 = line_arr[1]
            label_mat.append(int(line_arr[2]))  #标签
        return data_mat, label_mat  #返回训练数据和标签
    
    def sigmod(inx):
            return exp(inx) / (1 + exp(inx))
    
    #梯度上升法
    def grad_ascent(data_matin, class_label):
            data_matrix = mat(data_matin)   #将data_matin转为100 * 3矩阵
            label_matrix = mat(class_label).transpose()   #将class_label转为100 * 1的矩阵
            m,n = shape(data_matrix)  #m = 100, n = 100
            alpha = 0.001
            max_cycles = 500
            weights = ones((n, 1))  #生成100 * 1权值的单位矩阵
            for k in range(max_cycles):
                h = sigmod(data_matrix * weights) 
                error = (label_matrix - h)
                weights = weights + alpha * data_matrix.transpose() * error
            return weights
    
    #随机梯度上升法
    def stoc_grad_ascent0(data_matrix, class_label):
        m,n = shape(data_matrix)
        alpha = 0.01
        weights = ones(n)   #创建权值一维数组
        #print(weights)
        for i in range(m):
            h = sigmod(sum(data_matrix[i] * weights))
            error = class_label[i] - h
            temp = []
            for k in data_matrix[i]:
                temp.append(alpha * error * k)
            #print(temp)
            weights = weights + temp
    
        return weights
    
    #改进型随机梯度上升法
    def stoc_grad_ascent1(data_matrix, class_label, num_iter = 150):
        m,n = shape(data_matrix)
        weights = ones(n)
        data_index = range(m)
        for j in range(num_iter):
            for i in range(m):
                alpha = 4 / (1 + j + i) + 0.01
                rand_index = int(random.uniform(0, len(data_index)))
                h = sigmod(sum(data_matrix[rand_index] * weights))
                error = class_label[rand_index] - h
                temp = []
                for k in data_matrix[rand_index]:
                    temp.append(alpha * error * k)
                weights = weights + temp
                #del(data_index[rand_index])
    
        return weights
    
    def plot_best_fit(wei):
        #weights = wei.getA()
        weights = wei
        data_mat, label_mat = load_data_set()  #读取原始数据
        data_arr = array(data_mat)
        n = shape(data_arr)[0]
        xcord1 = []
        xcord2 = []
        ycord1 = []
        ycord2 = []
        for i in range(n):
            if int(label_mat[i]) == 1:
                xcord1.append(data_arr[i, 1])
                ycord1.append(data_arr[i, 2])
            else:
                xcord2.append(data_arr[i, 1])
                ycord2.append(data_arr[i, 2])
        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.scatter(xcord1, ycord1, s = 30, c = "red", marker = "s")
        ax.scatter(xcord2, ycord2, s = 30, c = "green")
        x = arange(-5.0, 5.0, 0.1)
        y = (-weights[0] - weights[1] * x) / weights[2]
        ax.plot(x, y)
        plt.xlabel("X1")
        plt.ylabel("X2")
        plt.show()
    
    def main():
        data_mat, label_mat = load_data_set()
        #weights = grad_ascent(data_mat, label_mat)
        weights = stoc_grad_ascent0(data_mat, label_mat)
        #weights = stoc_grad_ascent1(data_mat, label_mat)
        print(weights)
        plot_best_fit(weights)
    
    main()
    

    有几点需要注意:在画图函数中,若算法选择梯度上升法则将weights = wei注释,取消weights = wei.getA()的注释。若算法选择随机梯度上升法和改进型随机梯度上升法,则将weights = wei.getA()注释,取消weights = wei的注释。
    输入数据:

    -0.017612   14.053064   0
    -1.395634   4.662541    1
    -0.752157   6.538620    0
    -1.322371   7.152853    0
    0.423363    11.054677   0
    0.406704    7.067335    1
    0.667394    12.741452   0
    -2.460150   6.866805    1
    0.569411    9.548755    0
    -0.026632   10.427743   0
    0.850433    6.920334    1
    1.347183    13.175500   0
    1.176813    3.167020    1
    -1.781871   9.097953    0
    -0.566606   5.749003    1
    0.931635    1.589505    1
    -0.024205   6.151823    1
    -0.036453   2.690988    1
    -0.196949   0.444165    1
    1.014459    5.754399    1
    1.985298    3.230619    1
    -1.693453   -0.557540   1
    -0.576525   11.778922   0
    -0.346811   -1.678730   1
    -2.124484   2.672471    1
    1.217916    9.597015    0
    -0.733928   9.098687    0
    -3.642001   -1.618087   1
    0.315985    3.523953    1
    1.416614    9.619232    0
    -0.386323   3.989286    1
    0.556921    8.294984    1
    1.224863    11.587360   0
    -1.347803   -2.406051   1
    1.196604    4.951851    1
    0.275221    9.543647    0
    0.470575    9.332488    0
    -1.889567   9.542662    0
    -1.527893   12.150579   0
    -1.185247   11.309318   0
    -0.445678   3.297303    1
    1.042222    6.105155    1
    -0.618787   10.320986   0
    1.152083    0.548467    1
    0.828534    2.676045    1
    -1.237728   10.549033   0
    -0.683565   -2.166125   1
    0.229456    5.921938    1
    -0.959885   11.555336   0
    0.492911    10.993324   0
    0.184992    8.721488    0
    -0.355715   10.325976   0
    -0.397822   8.058397    0
    0.824839    13.730343   0
    1.507278    5.027866    1
    0.099671    6.835839    1
    -0.344008   10.717485   0
    1.785928    7.718645    1
    -0.918801   11.560217   0
    -0.364009   4.747300    1
    -0.841722   4.119083    1
    0.490426    1.960539    1
    -0.007194   9.075792    0
    0.356107    12.447863   0
    0.342578    12.281162   0
    -0.810823   -1.466018   1
    2.530777    6.476801    1
    1.296683    11.607559   0
    0.475487    12.040035   0
    -0.783277   11.009725   0
    0.074798    11.023650   0
    -1.337472   0.468339    1
    -0.102781   13.763651   0
    -0.147324   2.874846    1
    0.518389    9.887035    0
    1.015399    7.571882    0
    -1.658086   -0.027255   1
    1.319944    2.171228    1
    2.056216    5.019981    1
    -0.851633   4.375691    1
    -1.510047   6.061992    0
    -1.076637   -3.181888   1
    1.821096    10.283990   0
    3.010150    8.401766    1
    -1.099458   1.688274    1
    -0.834872   -1.733869   1
    -0.846637   3.849075    1
    1.400102    12.628781   0
    1.752842    5.468166    1
    0.078557    0.059736    1
    0.089392    -0.715300   1
    1.825662    12.693808   0
    0.197445    9.744638    0
    0.126117    0.922311    1
    -0.679797   1.220530    1
    0.677983    2.556666    1
    0.761349    10.693862   0
    -2.168791   0.143632    1
    1.388610    9.341997    0
    0.317029    14.739025   0
    

    实验结果如下所示:
    梯度上升法:


    5.PNG

    随机梯度上升法:


    6.PNG

    改进型随机梯度上升法:


    7.PNG

    由实验结果可知,改进型随机梯度上升法和梯度上升法的效果差不多,随机梯度上升法的效果则差一些。

    相关文章

      网友评论

        本文标题:逻辑回归原理分析与python实现

        本文链接:https://www.haomeiwen.com/subject/evtroftx.html