岭回归
正则化力度也是一个需要调参的过程
模型的保存与加载
api:sklearn.externals.joblib
保存:joblib.dump
加载:joblib.load
注意:文件格式:pkl,是一种二进制文件
"""案例:波士顿房价"""
from sklearn.datasets import load_boston
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
def myridge():
"""
岭回归直接预测房子价格
:return: None
"""
#获取数据
lb = load_boston()
#分割数据到训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(lb.data, lb.target, test_size=0.25)
print(y_train,y_test) #查看一下数据格式
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
# print(x_test.shape)
std_y = StandardScaler()
y_train = std_y.fit_transform(y_train.reshape(-1,1))
y_test = std_y.transform(y_test.reshape(-1,1))
#注意:转换器里面的数据必须要求是二维的,所以我们reshape了数据。x_train,x_test本身就是二维的
#岭回归进行房价预测
rd = Ridge(alpha=1.0) #alpha:正则化力度,默认1.0。这个超参数也可以利用网格搜索找到合适的值,可以是0-1之间的小数值,也可以是1-10之间的整数
rd.fit(x_train, y_train)
print("回归系数:\n", rd.coef_)
y_predict = rd.predict(x_test)
print("预测的房价为:\n", std_y.inverse_transform(y_predict))
print("均方差:\n", mean_squared_error(std_y.inverse_transform(y_test), std_y.inverse_transform(y_predict)))
return None
def loadmodel():
lb = load_boston()
#分割数据到训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(lb.data, lb.target, test_size=0.25)
print(y_train,y_test) #查看一下数据格式
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
# print(x_test.shape)
std_y = StandardScaler()
y_train = std_y.fit_transform(y_train.reshape(-1,1))
y_test = std_y.transform(y_test.reshape(-1,1))
rd = joblib.load("./ridge.pkl")
y_predict = rd.predict(x_test)
print("预测的房价为:\n", std_y.inverse_transform(y_predict))
print("均方差:\n", mean_squared_error(std_y.inverse_transform(y_test), std_y.inverse_transform(y_predict)))
if __name__ == "__main__":
myridge()
# loadmodel()
岭回归回归得到的回归系数更符合实际,更可靠。另外,能让估计参数的波动范围变小,变的更稳定。在存在病态数据偏多的研究中有较大的实用价值。
网友评论