- 导入库并加载泰坦尼克号数据集
import pandas #ipython notebook
titanic = pandas.read_csv("titanic_train.csv")
titanic.head(5) # 显示前五条数据
- 观察源数据集发现,age属性中有缺失, 通过计算该属性的均值将缺失处填补,使得数据的数量一致
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())
print (titanic.describe())
- 获取sex的值, 并用0和1代表男性和女性
print (titanic["Sex"].unique())
# Replace all the occurences of male with the number 0.
titanic.loc[titanic["Sex"] == "male", "Sex"] = 0
titanic.loc[titanic["Sex"] == "female", "Sex"] = 1
- 获取embarked的值, 用0,1,2分别表示S,C,Q
print (titanic["Embarked"].unique())
titanic["Embarked"] = titanic["Embarked"].fillna('S')
titanic.loc[titanic["Embarked"] == "S", "Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C", "Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q", "Embarked"] = 2
- 使用交叉验证并调用线性回归算法
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import KFold
# 我们用来预测目标的列
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
# 对线性回归类进行实例化
alg = LinearRegression()
# 为泰坦尼克数据集生成交叉验证折叠。它返回与训练和测试相对应的行索引
# 我们设置了random_state以确保每次运行时都得到相同的分割。
kf = KFold(titanic.shape[0], n_folds=3, random_state=1)
predictions = []
for train, test in kf:
train_predictors = (titanic[predictors].iloc[train,:])
# 训练算法的目标.
train_target = titanic["Survived"].iloc[train]
# 利用预测器和目标训练算法.
alg.fit(train_predictors, train_target)
# 我们现在可以在测试折叠部分做出预测
test_predictions = alg.predict(titanic[predictors].iloc[test,:])
predictions.append(test_predictions)
- 将predictions放到一个array中
predictions = np.concatenate(predictions, axis=0)
# 将预测映射到结果(只有可能的结果是1和0)
predictions[predictions > .5] = 1
predictions[predictions <=.5] = 0
accuracy = sum(predictions[predictions == titanic["Survived"]]) / len(predictions)
print (accuracy)
线性回归预测结果
- 再换一种算法预测, 使用逻辑回归
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
alg = LogisticRegression(random_state=1)
# 计算所有交叉验证折叠的精度分数。(比以前简单多了!)
scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3)
# 取分数的平均值(因为每一组都有一个分数)
print(scores.mean())
逻辑回归算法的预测结果
- 对真正测试集进行预处理
titanic_test = pandas.read_csv("test.csv")
titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median())
titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median())
titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0
titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1
titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S")
titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0
titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1
titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2
- 使用随机森林
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
# 用默认参数初始化算法
# n_estimators就是我们要生成的树的数量
# min_samples_split是进行拆分所需的最少行数
# min_samples_leaf是我们在树枝末端(树的底部点)可以得到的最小样本数量
alg = RandomForestClassifier(random_state=1, n_estimators=10, min_samples_split=2, min_samples_leaf=1)
# 计算所有交叉验证折叠的精度分数。
kf = cross_validation.KFold(titanic.shape[0], n_folds=3, random_state=1)
scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=kf)
# 取分数的平均值(因为每一组都有一个分数)
print(scores.mean())
随机森林预测结果1
调优,修改算法参数:
alg = RandomForestClassifier(random_state=1, n_estimators=100, min_samples_split=4, min_samples_leaf=2)
# 计算所有交叉验证折叠的精度分数。
kf = cross_validation.KFold(titanic.shape[0], 3, random_state=1)
scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=kf)
# 取分数的平均值
print(scores.mean())
随机森林预测结果2
- 自己构造一个特征
# 生成一个familysize列
titanic["FamilySize"] = titanic["SibSp"] + titanic["Parch"]
# apply方法生成一个新的列
titanic["NameLength"] = titanic["Name"].apply(lambda x: len(x))
import re
#从名称中获取标题的函数.
def get_title(name):
# 使用正则表达式搜索标题。标题总是由大写字母和小写字母组成,以句号结尾。
title_search = re.search(' ([A-Za-z]+)\.', name)
# 如果标题存在,提取并返回它.
if title_search:
return title_search.group(1)
return ""
# 获取所有标题并打印出每个标题出现的频率。
titles = titanic["Name"].apply(get_title)
print(pandas.value_counts(titles))
# 将每个标题映射为整数。有些标题非常罕见,它们被压缩成与其他标题相同的代码。
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Dr": 5, "Rev": 6, "Major": 7, "Col": 7, "Mlle": 8, "Mme": 8, "Don": 9, "Lady": 10, "Countess": 10, "Jonkheer": 10, "Sir": 9, "Capt": 7, "Ms": 2}
for k,v in title_mapping.items():
titles[titles == k] = v
# 验证我们转换了的所有东西
print(pandas.value_counts(titles))
# 添加到标题列.
titanic["Title"] = titles
- 画出各个特征对预测结果的影响关系
import numpy as np
from sklearn.feature_selection import SelectKBest, f_classif
import matplotlib.pyplot as plt
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked", "FamilySize", "Title", "NameLength"]
# 进行特征选择
selector = SelectKBest(f_classif, k=5)
selector.fit(titanic[predictors], titanic["Survived"])
#获取每个特性的原始p值,并将p值转换为分数
scores = -np.log10(selector.pvalues_)
# 画出成绩。看看“Pclass”、“Sex”、“Title”和“Fare”如何是最好的?
plt.bar(range(len(predictors)), scores)
plt.xticks(range(len(predictors)), predictors, rotation='vertical')
plt.show()
# 只选择四个最好的功能。
predictors = ["Pclass", "Sex", "Fare", "Title"]
alg = RandomForestClassifier(random_state=1, n_estimators=50, min_samples_split=8, min_samples_leaf=4)
- 通过bootsing和logistic预测
from sklearn.ensemble import GradientBoostingClassifier
import numpy as np
# 我们要集成的算法.
# 我们使用了线性预测器来进行逻辑回归,以及使用梯度提升分类器.
algorithms = [
[GradientBoostingClassifier(random_state=1, n_estimators=25, max_depth=3), ["Pclass", "Sex", "Age", "Fare", "Embarked", "FamilySize", "Title",]],
[LogisticRegression(random_state=1), ["Pclass", "Sex", "Fare", "FamilySize", "Title", "Age", "Embarked"]]
]
# 初始化交叉验证折叠
kf = KFold(titanic.shape[0], n_folds=3, random_state=1)
predictions = []
for train, test in kf:
train_target = titanic["Survived"].iloc[train]
full_test_predictions = []
# 对每一种算法进行预测
for alg, predictors in algorithms:
# 根据训练数据拟合算法.
alg.fit(titanic[predictors].iloc[train,:], train_target)
# 选择并预测测试折叠.
# 要将dataframe转换为所有float,并避免sklearn错误,必须使用.astype(float).
test_predictions = alg.predict_proba(titanic[predictors].iloc[test,:].astype(float))[:,1]
full_test_predictions.append(test_predictions)
# 使用一个简单的整体方案——平均预测得到最终的分类.
test_predictions = (full_test_predictions[0] + full_test_predictions[1]) / 2
# 任何大于0.5的值都被假设为1,小于0.5的值为0.
test_predictions[test_predictions <= .5] = 0
test_predictions[test_predictions > .5] = 1
predictions.append(test_predictions)
# 把所有的预测放在一个数组中.
predictions = np.concatenate(predictions, axis=0)
# 通过与训练数据的比较计算准确率.
accuracy = sum(predictions[predictions == titanic["Survived"]]) / len(predictions)
print(accuracy)
预测结果
小结
以上分别使用了'线性回归', '逻辑回归', '随机森林',以及'bootsing+logisic集成学习方法'对数据进行预测
网友评论