智能投标
智能投标这里采用了两种算法,但是总体思路都是先筛选出来可能亏损的标的然后再用一个回归器来预测具体选择这个标的的收益情况。
代码如下:
首先是用到的包包的导入,这里一般我都是固定的会导入pandas,numpy还有画图的代码,并且在画图的时候为了让他显示美观都会做如下的设置,所以其实可以直接写入配置文件中,参考(配置你的jupyter)
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import plotly
plotly.offline.init_notebook_mode(connected=True)
import plotly.plotly as py
import plotly.graph_objs as go
%matplotlib inline
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn import linear_model, preprocessing
from IPython.display import display
pd.set_option('display.max_columns', 9999)
pd.set_option('display.float_format', lambda x: '%.4f' % x)
sns.set_style("darkgrid",{"font.sans-serif":['simhei', 'Arial']}) # for chinese display
import matplotlib as mpl
mpl.rcParams['axes.unicode_minus'] = False # for minus display
import datetime
import xgboost as xgb
下一步是读取数据并且对于数据进行处理:
1) 时间类型的处理以及两个表的拼接
df_lc = pd.read_csv('./LC.csv')
df_lc['借款成功日期'] = pd.to_datetime(df_lc['借款成功日期'])
df_lp = pd.read_csv('./LP.csv')
df_lp['到期日期'] = pd.to_datetime(df_lp['到期日期'])
df_lp['还款日期'] = pd.to_datetime(df_lp['还款日期'].str.replace('\\\\N', '2099-12-31'))
df_lp['还款状态'] = df_lp['还款状态'].astype(int)
df_lp = df_lp[df_lp['到期日期'] < '2016-11-01'] # 最晚的recorddate是2017-02-22,所以选2016-11-01前到期来计算三个月逾期率是比较合适的
df_lc = df_lc[df_lc.ListingId.isin(df_lp.ListingId.unique())]
expected_pp = df_lp.groupby('ListingId')['应还本金'].sum()
ind_consistent = np.isclose(df_lc[['ListingId', '借款金额']].set_index('ListingId')['借款金额'], expected_pp)
df_lc = df_lc[df_lc.ListingId.isin(expected_pp[ind_consistent].index)]
df_lp = df_lp[df_lp.ListingId.isin(expected_pp[ind_consistent].index)]
expected_ret = df_lp.groupby('ListingId')[['应还本金', '应还利息']].sum().sum(axis=1)
pp = df_lc[['ListingId', '借款金额']].set_index('ListingId')['借款金额']
real_ret = expected_ret - df_lp.groupby('ListingId')[['剩余本金', '剩余利息']].sum().sum(axis=1)
real_ROI = real_ret / pp
df_lc = pd.merge(df_lc, real_ROI.rename('real_ROI').to_frame(), how='left', left_on='ListingId', right_index=True)
# ROI per month
df_lc['real_ROI_per_year'] = ((df_lc['real_ROI'] - 1) / df_lc['借款期限']) * 12
df_lc.head()
2) 类别变量的编码
X['初始评级'] = preprocessing.LabelEncoder().fit_transform(X['初始评级'])
X = pd.concat([X, pd.get_dummies(X['借款类型'], prefix='借款类型')], axis=1).drop('借款类型', axis=1)
X['是否首标'] = preprocessing.LabelEncoder().fit_transform(X['是否首标'])
X['性别'] = preprocessing.LabelEncoder().fit_transform(X['性别'])
X['手机认证'] = np.where(X['手机认证'] == '未成功认证', 0, 1)
X['户口认证'] = np.where(X['户口认证'] == '未成功认证', 0, 1)
X['视频认证'] = np.where(X['视频认证'] == '未成功认证', 0, 1)
X['学历认证'] = np.where(X['学历认证'] == '未成功认证', 0, 1)
X['征信认证'] = np.where(X['征信认证'] == '未成功认证', 0, 1)
X['淘宝认证'] = np.where(X['淘宝认证'] == '未成功认证', 0, 1)
X.head()
3)特征离散化
def add_ends_to_bins(bins):
return np.append(np.insert(bins, 0, -np.inf), np.inf)
dict_s_fea_dct = {}
s_amount_C = df_lc_C['借款金额']
q = [0.05, 0.2, 0.5, 0.8, 0.95]
bins = s_amount_C.quantile(q)
s_fea_dct_amount_C = pd.cut(s_amount_C, add_ends_to_bins(bins.values))
dict_s_fea_dct['amount'] = s_fea_dct_amount_C
可以画图说明:
x_quantile = np.arange(0.01, 1., 0.01)
df_lc['real_ROI_per_year'].quantile(x_quantile)
plt.plot(x_quantile, df_lc['real_ROI_per_year'].quantile(x_quantile));
先训练二分类模型:
y = df_lc.real_ROI_per_year < 0
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42, stratify=y)
xgb_params = {
'eta': 0.01,
'max_depth': 13,
'subsample': 0.7,
'colsample_bytree': 0.7,
'scale_pos_weight': (y_train == 0).sum() / (y_train == 1).sum(),
'max_delta_step': 1,
'objective': 'binary:logistic',
'eval_metric': 'error',
'silent': 1,
'shuffle': True,
}
dtrain = xgb.DMatrix(X_train.values, y_train.values)
dtest = xgb.DMatrix(X_test.values, y_test.values)
cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=10000, early_stopping_rounds=20,
verbose_eval=50, show_stdv=False)
cv_output[['train-error-mean', 'test-error-mean']].plot();
按交叉验证得到的参数重新训练:
num_boost_rounds = len(cv_output)
model = xgb.train(dict(xgb_params, silent=0), dtrain, num_boost_round=num_boost_rounds)
保存模型:
model.save_model('xgb_clf_first_step_maxdepth_13_for_default.model')
读取模型(如果之前已保存,则不用做之前的训练步骤):
model_for_default = xgb.Booster({'nthread':4}) #init model
model_for_default.load_model('xgb_clf_first_step_maxdepth_13_for_default.model') # load data
查看特征重要性:
fig, ax = plt.subplots(1, 1, figsize=(8, 13))
xgb.plot_importance(model_for_default, height=0.5, ax=ax);
fscore = model_for_default.get_fscore()
s_fscore = pd.Series({int(k.lstrip('f')): v for k, v in fscore.items()})
s_fscore.index = X.columns
s_fscore.sort_values(ascending=False)
model_for_default.predict(dtest)
confusion_matrix(y_test, model_for_default.predict(dtest) > 0.02)
网友评论