美文网首页
多元线性回归

多元线性回归

作者: Muggle01 | 来源:发表于2018-09-02 23:52 被阅读0次

    实现了基于线性最小二乘、基于梯度下降法、基于随机梯度下降法的多元线性回归。计算中发现对一元回归,三者均有较高的R方值;对多元回归,基于线性最小二乘的拟合精度最好,后两者精度较差。其原因为梯度下降法由于起始点选择的原因可能陷入局部最优。

    源程序

    import numpy as np
    from sklearn import datasets
    from sklearn.metrics import r2_score
    
    class LinearRegression:
    
        def __init__(self):
            """初始化Linear Regression模型"""
            self.coef_ = None
            self.intercept_ = None
            self._theta = None  
    
        def scaling(self, X_train, predict_X):
    #        """最值归一化"""
    #        scaling_X = np.ones(shape=original_X.shape)
    #        for i in range(original_X.shape[1]):
    #            scaling_X[:,i] = (original_X[:,i] - np.min(X_train[:,i]))/(np.max(X_train[:,i])-np.min(X_train[:,i]))
    
            """均值方差归一化"""
            scaling_X = np.empty(shape=X_train.shape)
            mean_X = np.empty(shape=X_train.shape[1])
            std_X = np.empty(shape=X_train.shape[1])
            for i in range(X_train.shape[1]):
                scaling_X[:,i] =(predict_X[:,i] - np.mean(X_train[:,i])) / (np.std(X_train[:,i]))
                mean_X[i] = np.mean(X_train[:,i])
                std_X[i] = np.std(X_train[:,i])
            return scaling_X, mean_X, std_X
    
        def fit_normal(self, X_train, y_train):
            """根据最小二乘法训练Linear Regression模型"""
            assert X_train.shape[0] == y_train.shape[0], \
                "the size of X_train must be equal to the size of y_train"
            X_b = np.hstack([np.ones((len(X_train), 1)), X_train]) #自变量左边加上全为1的一列
            self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train) #计算拟合的函数值
    
            self.intercept_ = self._theta[0].copy() #截距
            self.coef_ = self._theta[1:].copy() #系数
    
            return self
    
        def fit_gd(self, X_train, y_train, eta=0.01, n_iters=1e4):
            """根据训练数据集X_train, y_train, 使用梯度下降法训练Linear Regression模型"""
            assert X_train.shape[0] == y_train.shape[0], \
                "the size of X_train must be equal to the size of y_train"
    
            def J(theta, X_b, y):
                try:
                    return np.sum((y - X_b.dot(theta)) ** 2) / len(y)
                except:
                    return float('inf')
    
            def dJ(theta, X_b, y):
    #             res = np.empty(len(theta))
    #             res[0] = np.sum(X_b.dot(theta) - y)
    #             for i in range(1, len(theta)):
    #                 res[i] = (X_b.dot(theta) - y.reshape(-1)).dot(X_b[:, i])
    #             return res * 2 / len(X_b)
                return (X_b.T).dot(X_b.dot(theta) - y.reshape(-1)) * 2 / len(y) #X_b.dot(theta)计算结果为一个一维向量
    
            def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
    
                theta = initial_theta.copy() #初始化拟合参数向量
                cur_iter = 0 #迭代循环次数
    
                while cur_iter < n_iters:
                    gradient = dJ(theta, X_b, y)
                    last_theta = theta.copy()
                    theta = theta - eta * gradient
                    if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
                        break
    
                    cur_iter += 1
    
                return theta
            
            X_train, mean_X_train, std_X_train= self.scaling(X_train, X_train)
            X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
            initial_theta = np.zeros(X_b.shape[1])
            self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iters)
    
            #将所拟合的参数做归一化的逆向运算
            self.intercept_ = self._theta[0] - np.sum(self._theta[1:] * mean_X_train / std_X_train)
            self.coef_ = self._theta[1:] / std_X_train
    
            return self
    
        def fit_sgd(self, X_train, y_train, n_iters=5, t0=5, t1=50):
            """根据训练数据集X_train, y_train, 使用随机梯度下降法训练Linear Regression模型"""
            assert X_train.shape[0] == y_train.shape[0], \
                "the size of X_train must be equal to the size of y_train"
            assert n_iters >= 1
    
            def dJ_sgd(theta, X_b_i, y_i):
                return X_b_i * (X_b_i.dot(theta) - y_i) * 2.
    
            def sgd(X_b, y, initial_theta, n_iters, t0=5, t1=50):
    
                def learning_rate(t):
                    return t0 / (t + t1)
    
                theta = initial_theta.copy()
                m = len(X_b)
    
                for cur_iter in range(n_iters):
                    indexes = np.random.permutation(m)
                    X_b_new = X_b[indexes].copy()
                    y_new = y[indexes].copy()
                    for i in range(m):
                        gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
                        theta = theta - learning_rate(cur_iter * m + i) * gradient
    
                return theta
    
            X_train, mean_X_train, std_X_train= self.scaling(X_train, X_train)
            X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
            initial_theta = np.random.randn(X_b.shape[1])
            self._theta = sgd(X_b, y_train, initial_theta, n_iters, t0, t1)
    
            #将所拟合的参数做归一化的逆向运算
            self.intercept_ = self._theta[0] - np.sum(self._theta[1:] * mean_X_train / std_X_train)
            self.coef_ = self._theta[1:] / std_X_train
    
            return self
        
        def predict(self, X_predict):
            """给定待预测数据集X_predict,返回表示X_predict的结果向量"""
            assert self.intercept_ is not None and self.coef_ is not None, \
                "must fit before predict!"
            assert X_predict.shape[1] == len(self.coef_), \
                "the feature number of X_predict must be equal to X_train"
           
            X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
            return X_b.dot(self._theta)
    
        def score_normal(self, X_test, y_test):
            """根据测试数据集 X_test 和 y_test 确定最小二乘模型的准确度"""
    
            y_predict = self.predict(X_test)
            return r2_score(y_test, y_predict)
        
        def score_gd(self, X_train, X_test, y_test):
            """根据测试数据集 X_test 和 y_test 确定梯度下降、随机梯度下降模型的准确度"""
    
            X_test, mean_X_train, std_X_train= self.scaling(X_train, X_test)
            y_predict = self.predict(X_test)
            return r2_score(y_test, y_predict)
    
        def __repr__(self):
            return "LinearRegression()"
    
    #boston = datasets.load_boston()
    ##print (boston.DESCR)
    #X_train = boston.data
    #y_train = boston.target
    #X_train = X_train[y_train<50]
    #y_train = y_train[y_train<50]
    
    X_train = np.random.normal(3,4,size = (100,3))
    y_train = X_train.dot(np.array([[1], [2], [3]])) + 3 + np.random.normal(0,1,size = (100,1))
    
    Linearmodel = LinearRegression()
    
    Linearmodel.fit_normal(X_train, y_train)
    print(Linearmodel.score_normal(X_train, y_train))
    print(Linearmodel.intercept_, Linearmodel.coef_, '\\n')
    
    Linearmodel.fit_gd(X_train, y_train, eta=1e-3, n_iters=1e5)
    print(Linearmodel.score_gd(X_train, X_train, y_train))
    print(Linearmodel.intercept_, Linearmodel.coef_, '\\n')
    
    Linearmodel.fit_sgd(X_train, y_train, n_iters=500, t0=5, t1=50)
    print(Linearmodel.score_gd(X_train, X_train, y_train))
    print(Linearmodel.intercept_, Linearmodel.coef_, '\\n')
    
    

    python tips

    X_b.dot(theta) - y.reshape(-1)的计算中X_b.dot(theta)计算结果为一个1001一维向量,y为一个1001的矩阵,虽然只1列,但仍为二维矩阵,故两者相减将报错。

    对于使用基于梯度下降法、基于随机梯度下降法的多元线性回归均需要进行数据归一化,而基于最小二乘的线性回归不需要。

    np.hstack([A, B])将A、B两矩阵水平方向拼合;np.vstack([A, B])将A、B两矩阵竖直方向拼合;np.concatenate([A, B],axis=1)通过制定axis值控制水平和竖直方向的拼接。

    函数shuffle与permutation都是对原来的数组进行重新洗牌(即随机打乱原来的元素顺序);区别在于shuffle直接在原来的数组上进行操作,改变原来数组的顺序,无返回值。而permutation不直接在原来的数组上进行操作,而是返回一个新的打乱顺序的数组,并不改变原来的数组。

    相关文章

      网友评论

          本文标题:多元线性回归

          本文链接:https://www.haomeiwen.com/subject/qbmvwftx.html