美文网首页
随机森林参数调优及特征重要性

随机森林参数调优及特征重要性

作者: Jasmine晴天和我 | 来源:发表于2018-09-18 17:56 被阅读0次

    特征重要性

    #检测重要特征rf = RandomForestClassifier()

    rf.fit(X, y)

    f, ax = plt.subplots(figsize=(7, 5))

    ax.bar(range(len(rf.feature_importances_)),rf.feature_importances_)

    ax.set_title("Feature Importances")

    f.show()

    #每个例子属于哪个类的概率probs = rf.predict_proba(X)import pandas as pd

    probs_df = pd.DataFrame(probs, columns=['0', '1'])

    probs_df['was_correct'] = rf.predict(X) == yimport matplotlib.pyplot as plt

    f, ax = plt.subplots(figsize=(7, 5))

    probs_df.groupby('0').was_correct.mean().plot(kind='bar', ax=ax)

    ax.set_title("Accuracy at 0 class probability")

    ax.set_ylabel("% Correct")

    ax.set_xlabel("% trees for 0")

    f.show()

    特征重要性

    forest=RandomForestClassifier(n_estimators=10,n_jobs=-1,random_state=9)

    forest.fit(x_train,y_train)

    importances=forest.feature_importances_

    print('每个维度对应的重要性因子:\n',importances)

    indices = np.argsort(importances)[::-1]# a[::-1]让a逆序输出print('得到按维度重要性因子排序的维度的序号:\n',indices)

    most_import = indices[:3]#取最总要的3个print(x_train[:,most_import])

    特征重要性

    from sklearn.datasets import load_boston

    from sklearn.ensemble import RandomForestRegressor

    import numpy as np

    #Load boston housing dataset as an example

    boston = load_boston()

    X = boston["data"]

    print type(X),X.shape

    Y = boston["target"]

    names = boston["feature_names"]

    print names

    rf = RandomForestRegressor()

    rf.fit(X, Y)

    print "Features sorted by their score:"

    print sorted(zip(map(lambda x: round(x, 4), rf.feature_importances_), names), reverse=True)

    参数调优

    param_test1= {'n_estimators':list(range(10,71,10))}    #对参数'n_estimators'进行网格调参

    gsearch1= GridSearchCV(estimator = RandomForestClassifier(min_samples_split=100,min_samples_leaf=20,max_depth=8,max_features='sqrt' ,random_state=10), param_grid =param_test1, scoring='roc_auc',cv=5) 

    gsearch1.fit(X,y) 

    gsearch1.grid_scores_,gsearch1.best_params_, gsearch1.best_score_    #输出调参结果,并返回最优下的参数

    #输出结果如下:

    ([mean:0.80681, std: 0.02236, params: {'n_estimators': 10}, 

      mean: 0.81600, std: 0.03275, params:{'n_estimators': 20}, 

      mean: 0.81818, std: 0.03136, params:{'n_estimators': 30}, 

      mean: 0.81838, std: 0.03118, params:{'n_estimators': 40}, 

      mean: 0.82034, std: 0.03001, params:{'n_estimators': 50}, 

      mean: 0.82113, std: 0.02966, params:{'n_estimators': 60}, 

      mean: 0.81992, std: 0.02836, params:{'n_estimators': 70}], 

    {'n_estimators':60},  0.8211334476626017)

    #多个特征的网格搜索,如下所示

    param_test2= {'max_depth':list(range(3,14,2)),'min_samples_split':list(range(50,201,20))} 

    gsearch2= GridSearchCV(estimator = RandomForestClassifier(n_estimators= 60, min_samples_leaf=20,max_features='sqrt' ,oob_score=True,random_state=10),  param_grid = param_test2,scoring='roc_auc',iid=False, cv=5) 

    gsearch2.fit(X,y) 

    gsearch2.grid_scores_,gsearch2.best_params_, gsearch2.best_score_ 

    #通过查看袋外准确率(oob)来判别参数调整前后准确率的变化情况

    rf1= RandomForestClassifier(n_estimators= 60, max_depth=13, min_samples_split=110,  min_samples_leaf=20,max_features='sqrt' ,oob_score=True,random_state=10) 

    rf1.fit(X,y) 

    print(rf1.oob_score_)   

    #通过每次对1-3个特征进行网格搜索,重复此过程直到遍历每个特征,并得到最终的调参结果。

    相关文章

      网友评论

          本文标题:随机森林参数调优及特征重要性

          本文链接:https://www.haomeiwen.com/subject/oncmnftx.html