美文网首页
金融风控之贷款违约预测挑战赛 Task2

金融风控之贷款违约预测挑战赛 Task2

作者: 怕热的波波 | 来源:发表于2020-09-18 23:42 被阅读0次

    1、导入数据 略

    2、查看数据

    train_data.columns
    test_data.columns
    # isDefault是Y变量,test_data多了n2.2和n2.3字段
    
    train_data.info()
    
    train_data.describe()
    
    2.1查看缺失值的两种方法
    missing = train_data.isnull().sum()/len(train_data)
    missing = missing[missing > 0]
    missing.sort_values(inplace = True)
    missing.plot.bar()
    
    缺失值可视化
    def missing_data(data):
        total = data.isnull().sum().sort_values(ascending=False)
        percent = (data.isnull().sum()/data.isnull().count()*100).sort_values(ascending=False)
        return pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
    missing_data(train_data).head(20)
    
    缺失值表格
    2.2查看只有一个值的特征
    ①nunique() 返回唯一值个数

    ②unique() 返回唯一值array

    one_value_fea = [col for col in train_data.columns if train_data[col].nunique()<=1]
    one_value_fea_test = [col for col in test_data.columns if test_data[col].nunique()<=1]
    
    print(one_value_fea)
    print(one_value_fea_test)
    #['policyCode']
    
    print(f'There are {len(one_value_fea)} columns in train dataset with one unique value.')
    print(f'There are {len(one_value_fea_test)} columns in test dataset with one unique value.')
    #There are 1 columns in test dataset with one unique value.
    
    2.3根据数据类型进行特征分类

    分成category,numerical两类,其中numerical再细分成连续和离散两类

    numerical_fea = list(train_data.select_dtypes(exclude=['object']).columns)
    catagory_fea = list(filter(lambda x:x not in numeric_fea, list(train_data.columns)))
    
    将数值型变量分类为连续性变量和离散型变量
    #过滤数值型类别特征
    def get_numerical_serial_fea(data,feas):
        numerical_serial_fea = []
        numerical_noserial_fea = []
        for fea in feas:
            temp = data[fea].nunique()
            if temp <= 20:
                numerical_noserial_fea.append(fea)
                continue
            numerical_serial_fea.append(fea)
        return numerical_serial_fea,numerical_noserial_fea
    numerical_serial_fea,numerical_noserial_fea = get_numerical_serial_fea(train_data,numerical_fea)
    
    #可以和后面的单因素分析结合
    离散型数值数据的阈值可以取10,20,30,根据需要来
    numerical_noserial_fea
    #['term', 'homeOwnership', 'verificationStatus', 'isDefault', 'purpose', 'pubRecBankruptcies', 'initialListStatus', 'applicationType', 'policyCode', 'n11', 'n12']
    
    2.4连续型变量分布

    原代码运行时间比较长,且没有去掉id编码,所以也尝试了另一种代码。比较粗糙,但是也符合查看各变量分布的需求。

    f = pd.melt(train_data, value_vars=numerical_serial_fea)
    g = sns.FacetGrid(f, col="variable",  col_wrap=2, sharex=False, sharey=False)
    g = g.map(sns.distplot, "value")
    
    train_data[numerical_serial_fea].drop('id',axis=1).hist(bins=30, figsize=(15,20))
    plt.show()
    
    各变量分布
    2.5样本均衡性
    #1表示违约,0表示正常还款
    temp = train_data['isDefault'].value_counts()
    df = pd.DataFrame({'labels':temp.index, 
                       'values':temp.values})
    plt.figure(figsize = (6,6))
    plt.title('违约情况分布')
    sns.set_color_codes(palette='bright')
    sns.barplot(x = 'labels', y = 'values', data=df)
    plt.show()
    
    #总体违约率
    train_data['isDefault'].mean()
    #0.1995125
    

    总体违约率19.95%,属于不均衡样本。

    2.6查看单因素违约率
    def plot_stats(feature,label_rotation=False,horizontal_layout=True):
        temp = train_data[feature].value_counts()
        df1 = pd.DataFrame({feature: temp.index,'Number of contracts': temp.values})
    
        # 违约率[0,1],可以用平均值计算比例
        cat_perc = train_data[[feature, 'isDefault']].groupby([feature],as_index=False).mean()
        cat_perc.sort_values(by='isDefault', ascending=False, inplace=True)
        
        #ncols,2列1行,nrows,1列2行
        if(horizontal_layout):
            fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12,6))
        else:
            fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(12,14))
        sns.set_color_codes("pastel")
        s = sns.barplot(ax=ax1, x = feature, y="Number of contracts",data=df1)
        if(label_rotation):
            s.set_xticklabels(s.get_xticklabels(),rotation=90)
        
        s = sns.barplot(ax=ax2, x = feature, y='isDefault', order=cat_perc[feature], data=cat_perc)
        if(label_rotation):
            s.set_xticklabels(s.get_xticklabels(),rotation=90)
        #增加平均逾期率参考线
        plt.axhline(0.1995125, color = 'red', linewidth = 2, linestyle='--')
        plt.ylabel('Percent of target with value 1 [%]', fontsize=10)
        plt.tick_params(axis='both', which='major', labelsize=10)
    
        plt.show()
    
    plot_stats('grade')
    plot_stats('term')
    plot_stats('homeOwnership')
    plot_stats('employmentLength',1,0)
    
    'grade' 'term' 'homeOwnership' 'employmentLength'
    此外,通过2.3发现的离散型数据特征n11和n12具有良好的区分性。

    2.7数值型变量不同y值分布差异

    def plot_distribution_comp(var,nrow=2):
        
        i = 0
        t1 = train_data.loc[train_data['isDefault'] != 0]
        t0 = train_data.loc[train_data['isDefault'] == 0]
    
        sns.set_style('whitegrid')
        plt.figure()
        fig, ax = plt.subplots(nrow,2,figsize=(12,6*nrow))
    
        for feature in var:
            i += 1
            plt.subplot(nrow,2,i)
            sns.kdeplot(t1[feature].dropna(), bw=0.1, label="isDefault = 1")
            sns.kdeplot(t0[feature].dropna(), bw=0.1, label="isDefault = 0")
            plt.ylabel('Density plot', fontsize=12)
            plt.xlabel(feature, fontsize=12)
            locs, labels = plt.xticks()
            plt.tick_params(axis='both', which='major', labelsize=12)
        plt.show()
    
    var = ['purpose', 'homeOwnership', 'dti', 'revolUtil', 'delinquency_2years', 'regionCode']
    plot_distribution_comp(var, nrow=3)
    
    9.png

    选取的几个特征违约与否的分布无明显差异,需要后续特征处理再进行区分。

    2.8类别型变量不同y值分布差异
    train_data1 = train_data.loc[train_data['isDefault'] != 0]
    train_data0 = train_data.loc[train_data['isDefault'] == 0]
    
    fig,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2,figsize=(12,8))
    train_data1.groupby('grade')['grade'].count().plot(kind='barh', ax=ax1, 
                                                       title='Fraud-grade distribution')
    train_data0.groupby('grade')['grade'].count().plot(kind='barh', ax=ax2, 
                                                       title='non_Fraud-grade distribution')
    train_data1.groupby('employmentLength')['employmentLength'].count().plot(kind='barh', ax=ax3, 
                                                       title='Fraud-employmentLength distribution')
    train_data0.groupby('employmentLength')['employmentLength'].count().plot(kind='barh', ax=ax4, 
                                                       title='non_Fraud-employmentLength distribution')
    
    10.png

    grade有比较明显的差异,和单因素违约率的结果也相符

    2.9比较train_data和test_data时间段是否一致
    #转化成时间格式  issueDateDT特征表示数据日期离数据集中日期最早的日期(2007-06-01)的天数
    train_data['issueDate'] = pd.to_datetime(train_data['issueDate'],format='%Y-%m-%d')
    startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d')
    train_data['issueDateDT'] = train_data['issueDate'].apply(lambda x: x-startdate).dt.days
    #转化成时间格式
    test_data['issueDate'] = pd.to_datetime(test_data['issueDate'],format='%Y-%m-%d')
    startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d')
    test_data['issueDateDT'] = test_data['issueDate'].apply(lambda x: x-startdate).dt.days
    plt.hist(train_data['issueDateDT'], label='train')
    plt.hist(test_data['issueDateDT'], label='test')
    plt.legend()
    plt.title('Distribution of issueDateDT dates')
    
    11.png

    时间分布一致,后面可以直接建模拟合。

    通过EDA部分,可以从宏观角度对每组数据有一个直观的认识,通过与业务的讨论,确定合适的特征变量、数据现象,可以让EDA部分更有实际意义,简化数据清理过程。

    相关文章

      网友评论

          本文标题:金融风控之贷款违约预测挑战赛 Task2

          本文链接:https://www.haomeiwen.com/subject/tzdsyktx.html