美文网首页
2020-04-04

2020-04-04

作者: 酸菜鱼_02a6 | 来源:发表于2020-04-04 21:51 被阅读0次

    分类模型融合

    from sklearn.datasets import make_blobs
    from sklearn import datasets
    from sklearn.tree import DecisionTreeClassifier
    import numpy as np
    from sklearn.ensemble import RandomForestClassifier
    from sklearn.ensemble import VotingClassifier
    from xgboost import XGBClassifier
    from sklearn.linear_model import LogisticRegression
    from sklearn.svm import SVC
    from sklearn.model_selection import train_test_split
    from sklearn.datasets import make_moons
    from sklearn.metrics import accuracy_score,roc_auc_score
    from sklearn.model_selection import cross_val_score
    from sklearn.model_selection import StratifiedKFold
    
    #Voting投票机制:
    '''
    硬投票:对多个模型直接进行投票,不区分模型结果的相对重要度,最终投票数最多的类为最终被预测的类。
    '''
    iris = datasets.load_iris()
    
    x=iris.data
    y=iris.target
    x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)
    
    clf1 = XGBClassifier(learning_rate=0.1, n_estimators=150, max_depth=3, min_child_weight=2, subsample=0.7,
                         colsample_bytree=0.6, objective='binary:logistic')
    clf2 = RandomForestClassifier(n_estimators=50, max_depth=1, min_samples_split=4,
                                  min_samples_leaf=63,oob_score=True)
    clf3 = SVC(C=0.1)
    
    # 硬投票
    eclf = VotingClassifier(estimators=[('xgb', clf1), ('rf', clf2), ('svc', clf3)], voting='hard')
    for clf, label in zip([clf1, clf2, clf3, eclf], ['XGBBoosting', 'Random Forest', 'SVM', 'Ensemble']):
        scores = cross_val_score(clf, x, y, cv=5, scoring='accuracy')
        print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))
    '''
    软投票:和硬投票原理相同,增加了设置权重的功能,可以为不同模型设置不同权重,进而区别模型不同的重要度。
    '''
    x=iris.data
    y=iris.target
    x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)
    
    clf1 = XGBClassifier(learning_rate=0.1, n_estimators=150, max_depth=3, min_child_weight=2, subsample=0.8,
                         colsample_bytree=0.8, objective='binary:logistic')
    clf2 = RandomForestClassifier(n_estimators=50, max_depth=1, min_samples_split=4,
                                  min_samples_leaf=63,oob_score=True)
    clf3 = SVC(C=0.1, probability=True)
    
    # 软投票
    eclf = VotingClassifier(estimators=[('xgb', clf1), ('rf', clf2), ('svc', clf3)], voting='soft', weights=[2, 1, 1])
    clf1.fit(x_train, y_train)
    
    for clf, label in zip([clf1, clf2, clf3, eclf], ['XGBBoosting', 'Random Forest', 'SVM', 'Ensemble']):
        scores = cross_val_score(clf, x, y, cv=5, scoring='accuracy')
        print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))
    
    #分类的Stacking\Blending融合
    '''
    5-Fold Stacking
    '''
    from sklearn.ensemble import RandomForestClassifier
    from sklearn.ensemble import ExtraTreesClassifier,GradientBoostingClassifier
    import pandas as pd
    #创建训练的数据集
    data_0=iris.data
    data=data_0[:100,:]
    target_0=iris.target
    target=target_0[:100]
    #模型融合中使用到的单个单模型
    clfs=[LogisticRegression(solver='lbfgs'),
          RandomForestClassifier(n_estimators=5,n_jobs=-1,criterion='gini'),
          ExtraTreesClassifier(n_estimators=5, n_jobs=-1, criterion='entropy'),
          GradientBoostingClassifier(learning_rate=0.05, subsample=0.5, max_depth=6, n_estimators=5)]
    #切分一部分数据作为测试集
    X, X_predict, y, y_predict = train_test_split(data, target, test_size=0.3, random_state=2020)
    
    dataset_blend_train = np.zeros((X.shape[0], len(clfs)))
    dataset_blend_test = np.zeros((X_predict.shape[0], len(clfs)))
    ##5折stacking
    n_splits = 5
    skf = StratifiedKFold(n_splits)
    skf = skf.split(X, y)
    
    for j, clf in enumerate(clfs):
        #依次训练各个单模型
        dataset_blend_test_j = np.zeros((X_predict.shape[0], 5))
        for i, (train, test) in enumerate(skf):
            #5-Fold交叉训练,使用第i个部分作为预测,剩余的部分来训练模型,获得其预测的输出作为第i部分的新特征。
            X_train, y_train, X_test, y_test = X[train], y[train], X[test], y[test]
            clf.fit(X_train, y_train)
            y_submission = clf.predict_proba(X_test)[:, 1]
            dataset_blend_train[test, j] = y_submission
            dataset_blend_test_j[:, i] = clf.predict_proba(X_predict)[:, 1]
        #对于测试集,直接用这k个模型的预测值均值作为新的特征。
        dataset_blend_test[:, j] = dataset_blend_test_j.mean(1)
        print("val auc Score: %f" % roc_auc_score(y_predict, dataset_blend_test[:, j]))
    
    clf = LogisticRegression(solver='lbfgs')
    clf.fit(dataset_blend_train, y)
    y_submission = clf.predict_proba(dataset_blend_test)[:, 1]
    
    print("Val auc Score of Stacking: %f" % (roc_auc_score(y_predict, y_submission)))
    
    #分类的Stacking融合(利用mlxtend)
    import warnings
    warnings.filterwarnings('ignore')
    import itertools
    import numpy as np
    import seaborn as sns
    import matplotlib.pyplot as plt
    import matplotlib.gridspec as gridspec
    from sklearn import datasets
    from sklearn.linear_model import LogisticRegression
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.naive_bayes import GaussianNB 
    from sklearn.ensemble import RandomForestClassifier
    from mlxtend.classifier import StackingClassifier
    from sklearn.model_selection import cross_val_score
    from mlxtend.plotting import plot_learning_curves
    from mlxtend.plotting import plot_decision_regions
    
    iris = datasets.load_iris()
    X, y = iris.data[:, 1:3], iris.target
    
    clf1 = KNeighborsClassifier(n_neighbors=1)
    clf2 = RandomForestClassifier(random_state=1)
    clf3 = GaussianNB()
    lr = LogisticRegression()
    sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], 
                              meta_classifier=lr)
    label = ['KNN', 'Random Forest', 'Naive Bayes', 'Stacking Classifier']
    clf_list=[clf1,clf2,clf3,sclf]
    fig=plt.figure(figsize=(10,8))
    gs=gridspec.GridSpec(2,2)
    grid=itertools.product([0,1],repeat=2)
    
    clf_cv_mean=[]
    clf_cv_std=[]
    for clf, label, grd in zip(clf_list, label, grid):
            
        scores = cross_val_score(clf, X, y, cv=3, scoring='accuracy')
        print("Accuracy: %.2f (+/- %.2f) [%s]" %(scores.mean(), scores.std(), label))
        clf_cv_mean.append(scores.mean())
        clf_cv_std.append(scores.std())
            
        clf.fit(X, y)
        ax = plt.subplot(gs[grd[0], grd[1]])
        fig = plot_decision_regions(X=X, y=y, clf=clf)
        plt.title(label)
    
    plt.show()
    

    关于Blending
    比stacking简单
    使用数据少,可能会过拟合
    其主要思路是把原始的训练集先分成两部分,比如70%的数据作为新的训练集,剩下30%的数据作为测试集。在第一层,在这70%的数据上训练多个模型,然后去预测那30%数据的label,同时也预测test集的label。在第二层,我们就直接用这30%数据在第一层预测的结果做为新特征继续训练,然后用test集第一层预测的label做特征,用第二层训练的模型做进一步预测

    相关文章

      网友评论

          本文标题:2020-04-04

          本文链接:https://www.haomeiwen.com/subject/fagsphtx.html