美文网首页数据分析
读书笔记-sklearn笔记

读书笔记-sklearn笔记

作者: 多一点6013 | 来源:发表于2018-09-07 11:11 被阅读0次

    本文是该系列读书笔记的第二章数据预处理部分

    import pandas as pd
    import numpy as np
    

    获取数据

    import os 
    import tarfile
    from six.moves import urllib
    download_root="https://raw.githubusercontent.com/ageron/handson-ml/master/"
    house_path="datasets/housing"
    housing_url=download_root+house_path+"/housing.tgz"
    
    def fecthing_housing_data(housing_url=housing_url,house_path=house_path):
        if not os.path.exists(house_path):
            os.makedirs(house_path)
        tgz_path=os.path.join(house_path,'housing.tgz')
        urllib.request.urlretrieve(housing_url,tgz_path)
        housing_tgz=tarfile.open(tgz_path)
        housing_tgz.extractall(path=house_path)
        housing_tgz.close()
    
    def load_housing_data(house_path=house_path):
        csv_path=os.path.join(house_path,"housing.csv")
        return pd.read_csv(csv_path)
    

    数据的初步分析,数据探索

    # fecthing_housing_data()  # 下载数据,解压出csv文件
    housing=load_housing_data()
    housing.head()
    
    image
    housing.info()
    # total_bedrooms 存在缺失值,
    # 前9列为float格式,经度,维度,房龄中位数,总的房间数,卧室数目,人口,家庭数,收入中位数,房屋价格的中位数,
    # 最后一列为离海距离为object类型
    
    <class 'pandas.core.frame.DataFrame'>
    RangeIndex: 20640 entries, 0 to 20639
    Data columns (total 10 columns):
    longitude             20640 non-null float64
    latitude              20640 non-null float64
    housing_median_age    20640 non-null float64
    total_rooms           20640 non-null float64
    total_bedrooms        20433 non-null float64
    population            20640 non-null float64
    households            20640 non-null float64
    median_income         20640 non-null float64
    median_house_value    20640 non-null float64
    ocean_proximity       20640 non-null object
    dtypes: float64(9), object(1)
    memory usage: 1.6+ MB
    
    # 需要查看ocean_proximity都包含哪些,
    housing['ocean_proximity'].value_counts()
    
    <1H OCEAN     9136
    INLAND        6551
    NEAR OCEAN    2658
    NEAR BAY      2290
    ISLAND           5
    Name: ocean_proximity, dtype: int64
    
    # 对数值类型的特征进行初步的统计
    housing.describe()
    
    image
    %matplotlib inline
    import matplotlib.pyplot as plt
    # 查看每个数值特征的分布,
    housing.hist(bins=50,figsize=(20,15))
    # plt.show()
    
    image

    地理分布

    housing.plot(kind="scatter", x="longitude", y="latitude")
    
    <matplotlib.axes._subplots.AxesSubplot at 0x19bbfcc0>
    
    image
    housing.plot(kind="scatter", x="longitude", y="latitude",alpha=0.4)
    # 标量,可选,默认值无,alpha混合值,介于0(透明)和1(不透明)之间
    # 显示高密度区域的散点图,颜色越深,表示人口越密集,虽然我对加州的地理位置不是特别清楚
    
    <matplotlib.axes._subplots.AxesSubplot at 0x1a705b70>
    
    image
    housing.plot(kind='scatter',x='longitude',y='latitude',alpha=0.4,
                s=housing['population']/50,label='population',
                c='median_house_value',cmap=plt.get_cmap("jet"),colorbar=True,
                figsize=(9,6))
    # import matplotlib
    # plt.figure(figsize=(15,9)) 
    # sc=plt.scatter(housing['longitude'],housing['latitude'],alpha=0.4,
    #             s=housing['population']/100,label='population',
    #             c=housing['median_house_value'],cmap=plt.get_cmap("jet"))
    # plt.legend()
    # matplotlib.rcParams["font.sans-serif"]=["SimHei"]
    # matplotlib.rcParams['axes.unicode_minus'] = False
    # matplotlib.rcParams['font.size'] =15
    # plt.xlabel('经度')
    # plt.ylabel('纬度')
    # color_bar=plt.colorbar(sc)
    # color_bar.set_label('meidan_house_value')
    # plt.show()
    #以上为使用plt的完整代码,将坐标轴的内容以及添加colorbar,设置中文坐标轴标题
    
    <matplotlib.axes._subplots.AxesSubplot at 0x19ffb390>
    
    image
    #  房价与位置和人口密度联系密切,但是如何用数学的角度来描述几个变量之间的关联呢,可以使用标准相关系数standard correlation coefficient 
    # 常用的相关系数为皮尔逊相关系数
    corr_matrix = housing.corr()
    corr_matrix
    
    image

    数据特征的相关性

    import seaborn as sns
    plt.Figure(figsize=(25,20))
    hm=sns.heatmap(corr_matrix,cbar=True,annot=True,square=True,fmt='.2f',annot_kws={'size':9}, cmap="YlGnBu")
    plt.show()
    
    image
    corr_matrix['median_house_value'].sort_values(ascending=False)
    """
    相关系数的范围是 -1 到 1。当接近 1 时,意味强正相关;
    例如,当收入中位数增加时,房价中位数也会增加。
    当相关系数接近 -1 时,意味强负相关;
    纬度和房价中位数有轻微的负相关性(即,越往北,房价越可能降低)。
    最后,相关系数接近 0,意味没有线性相关性。
    """
    
    # 使用pandas中的scatter_matrix 可以从另外一种角度分析多个变量之间的相关性
    from pandas.plotting import  scatter_matrix
    attributes=['median_house_value',"median_income","total_bedrooms","housing_median_age"]
    scatter_matrix(housing[attributes],figsize=(12,9))
    # sns.pairplot(housing[['median_house_value',"median_income",]],height=5)
    # 使用seaborn中的pariplot可以实现同样的结果
    housing.plot(kind="scatter",x='median_income',y='median_house_value',alpha=0.2)
    
    <matplotlib.axes._subplots.AxesSubplot at 0x1e3df9e8>
    
    image image

    创建新的特征

    • 重点关注收入的中位数与房屋价值的中位数之间的关系,从上图以及相关系数都可以得到两者之间存在很明显的正相关
    • 可以清洗的看到向上的趋势,并且数据点不是非常分散,
    • 我们之前统计得到的最高房价位于5000000美元的水平线
    • 从频率分布直方图hist可以看到housing_median_age ,meidan_house_value 具有长尾分布,可以尝试对其进行log或者开根号等转化
    • 当然,不同项目的处理方法各不相同,但大体思路是相似的。
    housing['rooms_per_household']=housing['total_rooms']/housing['households']
    housing['bedrooms_per_room']= housing['total_bedrooms']/housing['total_rooms']
    housing['population_per_household']=housing['population']/housing['households']
    
    corr_matrix = housing.corr()
    corr_matrix['median_house_value'].sort_values(ascending=False)
    # """
    # 新的特征房间中,卧室占比与房屋价值中位数有着更明显的负相关性,比例越低,房价越高;
    # 每家的房间数也比街区的总房间数的更有信息,很明显,房屋越大,房价就越高
    # """
    
    median_house_value          1.000000
    median_income               0.688075
    rooms_per_household         0.151948
    total_rooms                 0.134153
    housing_median_age          0.105623
    households                  0.065843
    total_bedrooms              0.049686
    population_per_household   -0.023737
    population                 -0.024650
    longitude                  -0.045967
    latitude                   -0.144160
    bedrooms_per_room          -0.255880
    Name: median_house_value, dtype: float64
    

    数据清洗, 创建处理流水线

    • 缺失值处理
    • 处理object文本数据类型
    • 特征放缩
    • 构建模型pepeline
    • 以上几个步骤我们在之前的博客中基本上都已经用过,这里作为读书笔记不会再过多的详细解释
    # total_bedrooms特征缺失值处理
    """
    - 去掉含有缺失值的样本,dropna()
    - 去掉含有缺失值的特征 dropna(axis=1)
    - 进行填充(中位数,平均值,0,插值填充) fillna(housing['total_bedrooms'].median()) 较为方便的使用pandas中的方法
    """
    from sklearn.preprocessing import Imputer
    imputer=Imputer(strategy='mean')
    housing_num=housing.drop('ocean_proximity',axis=1)
    imputer.fit(housing_num)
    
    Imputer(axis=0, copy=True, missing_values='NaN', strategy='mean', verbose=0)
    
    housing_num_trans=pd.DataFrame(imputer.transform(housing_num),columns=housing_num.columns)
    housing_num_trans.info()
    # 缺失值补齐,总觉得如果是缺失值处理的话,可以直接用pandas中的fillna会节省一点时间,在原始的数据上直接处理掉,后面也就不用再去担心这个
    
    <class 'pandas.core.frame.DataFrame'>
    RangeIndex: 20640 entries, 0 to 20639
    Data columns (total 12 columns):
    longitude                   20640 non-null float64
    latitude                    20640 non-null float64
    housing_median_age          20640 non-null float64
    total_rooms                 20640 non-null float64
    total_bedrooms              20640 non-null float64
    population                  20640 non-null float64
    households                  20640 non-null float64
    median_income               20640 non-null float64
    median_house_value          20640 non-null float64
    rooms_per_household         20640 non-null float64
    bedrooms_per_room           20640 non-null float64
    population_per_household    20640 non-null float64
    dtypes: float64(12)
    memory usage: 1.9 MB
    
    # 处理文本object类型数据
    from sklearn.preprocessing import  LabelEncoder
    encoder= LabelEncoder()
    house_cat=housing['ocean_proximity']
    house_cat_encode=encoder.fit_transform(house_cat)
    house_cat_encode
    
    array([3, 3, 3, ..., 1, 1, 1], dtype=int64)
    
    encoder.classes_
    
    array(['<1H OCEAN', 'INLAND', 'ISLAND', 'NEAR BAY', 'NEAR OCEAN'],
          dtype=object)
    
    • 在之前博客中也提到类似的操作,改操作可能会将两个临近的值
    • 比两个疏远的值更为相似,因此一般情况下,对与类标才会使用LabelEncoder,对于特征不会使用该方式对特征转换
    • 更为常用的操作是独热编码,给每个分类创建一个二元属性,比如当分类是INLAND,有则是1,没有则是0
    • skleanrn中提供了编码器OneHotEncoder,类似与pandas中pd.get_dummies()
    from sklearn.preprocessing import OneHotEncoder
    # OneHotEncoder只能对数值型数据进行处理,只接受2D数组
    encoder=OneHotEncoder()
    housing_cat_1hot=encoder.fit_transform(house_cat_encode.reshape((-1,1)))
    housing_cat_1hot
    
    <20640x5 sparse matrix of type '<class 'numpy.float64'>'
        with 20640 stored elements in Compressed Sparse Row format>
    
    housing_cat_1hot.toarray()
    
    array([[0., 0., 0., 1., 0.],
           [0., 0., 0., 1., 0.],
           [0., 0., 0., 1., 0.],
           ...,
           [0., 1., 0., 0., 0.],
           [0., 1., 0., 0., 0.],
           [0., 1., 0., 0., 0.]])
    
    # 使用LabelBinarizer 可以实现同样的效果
    from sklearn.preprocessing import  LabelBinarizer
    encoder=LabelBinarizer()
    housing_cat_1hot=encoder.fit_transform(house_cat)
    housing_cat_1hot
    
    array([[0, 0, 0, 1, 0],
           [0, 0, 0, 1, 0],
           [0, 0, 0, 1, 0],
           ...,
           [0, 1, 0, 0, 0],
           [0, 1, 0, 0, 0],
           [0, 1, 0, 0, 0]])
    
    # 直接在原始的数据上使用pandas.get_dummies()是最简单的方法
    pd.get_dummies(housing[['ocean_proximity']]).head()
    
    image
    • 特征放缩 我们常用到的MinMaxScaler和StandandScaler两种
    • 一般会对不同范围内的特征进行放缩,有助于优化算法收敛的速度(尤其是针对梯度提升的优化算法)
    • 归一化: 减去最小值,然后除以最大最小值的差
    • 标准化: 减去平均值,然后除以方差,得到均值为0,方差为1的标准正态分布,受异常值影响比较小,决策树和随机森林不需要特征放缩
    • 特征放缩一般针对训练数据集进行transform_fit,对测试集数据进行transform
    # 从划分数据集→pipeline   从这里开始算是一个完整的数据处理流程
    from sklearn.model_selection import  train_test_split
    housing=load_housing_data()
    # train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)  #  随机采样
    from sklearn.model_selection import StratifiedShuffleSplit  #  分层采样
    
    split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
    housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
    housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
    
    for train_index, test_index in split.split(housing, housing["income_cat"]): # 按照收入中位数进行分层采样
        strat_train_set = housing.loc[train_index]
        strat_test_set = housing.loc[test_index]
    housing = strat_train_set.copy()  # 创建一个副本,以免损伤训练集,
    
    housing.info()
    
    <class 'pandas.core.frame.DataFrame'>
    Int64Index: 16512 entries, 17606 to 15775
    Data columns (total 11 columns):
    longitude             16512 non-null float64
    latitude              16512 non-null float64
    housing_median_age    16512 non-null float64
    total_rooms           16512 non-null float64
    total_bedrooms        16354 non-null float64
    population            16512 non-null float64
    households            16512 non-null float64
    median_income         16512 non-null float64
    median_house_value    16512 non-null float64
    ocean_proximity       16512 non-null object
    income_cat            16512 non-null float64
    dtypes: float64(10), object(1)
    memory usage: 1.5+ MB
    
    #转化流水线
    from sklearn.pipeline import Pipeline
    from sklearn.preprocessing import StandardScaler
    num_pipeline=Pipeline([('imputer',Imputer(strategy='median')),('std_scaler',StandardScaler())])
    housing = strat_train_set.drop("median_house_value", axis=1)
    housing_labels = strat_train_set["median_house_value"].copy()
    housing_num=housing.drop('ocean_proximity',axis=1)
    housing_num_tr = num_pipeline.fit_transform(housing_num)
    housing_cat=housing['ocean_proximity']
    housing_cat_tr= LabelBinarizer().fit_transform(housing_cat)
    housing_train=np.c_[housing_num_tr,housing_cat_tr]
    housing_train.shape
    #  数字特征与categoriy 特征不能同时进行转化,需要进行FeatureUnion
    # 你给它一列转换器(可以是所有的转换器),当调用它的transform()方法,每个转换器的transform()会被并行执行,
    # 等待输出,然后将输出合并起来,并返回结果
    # 当然也可以通过分批转化,然后通过np将转化好的数据集合并,本质上没有什么区别,只不过对于测试集仍然需要transform,然后再合并成转化好的测试集
    
    (16512, 14)
    
    import os
    import sys
    sys.path.append(os.getcwd())
    from future_encoders import ColumnTransformer
    from future_encoders import OneHotEncoder
    #  都是从future_encoders 这个单独的包中导入的,OneHotEncoder不是从preprocessing中导入的
    
    num_attribs = list(housing_num)
    cat_attribs = ["ocean_proximity"]
    
    full_pipeline = ColumnTransformer([
            ("num", num_pipeline, num_attribs),
            ("cat", OneHotEncoder(), cat_attribs),
        ])
    
    housing_prepared = full_pipeline.fit_transform(housing)
    housing_prepared
    
    array([[-1.15604281,  0.77194962,  0.74333089, ...,  0.        ,
             1.        ,  0.        ],
           [-1.17602483,  0.6596948 , -1.1653172 , ...,  0.        ,
             1.        ,  0.        ],
           [ 1.18684903, -1.34218285,  0.18664186, ...,  0.        ,
             1.        ,  1.        ],
           ...,
           [ 1.58648943, -0.72478134, -1.56295222, ...,  0.        ,
             1.        ,  0.        ],
           [ 0.78221312, -0.85106801,  0.18664186, ...,  0.        ,
             1.        ,  0.        ],
           [-1.43579109,  0.99645926,  1.85670895, ...,  0.        ,
             1.        ,  0.        ]])
    
    # 比较两种方法获得的处理之后的矩阵是否等价
    np.allclose(housing_prepared, housing_train)
    
    True
    

    相关文章

      网友评论

        本文标题:读书笔记-sklearn笔记

        本文链接:https://www.haomeiwen.com/subject/nkhzwftx.html