KNN算法

作者: longsan0918 | 来源:发表于2018-12-18 10:27 被阅读17次
# -*- coding: utf-8 -*-
# @Time    : 2018/12/14 上午11:22
# @Author  : scl
# @Email   : 1163820757@qq.com
# @File    : KNN.py
# @Software: PyCharm

'''
莺尾花分类问题 使用KNN算法构建模型进行预测
'''
import numpy as np
import pandas as  pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings

from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier ##KNN分类
from sklearn.preprocessing import StandardScaler # 数据标准化

## 设置字符集,防止中文乱码
mpl.rcParams['font.sans-serif']=[u'simHei']
mpl.rcParams['axes.unicode_minus']=False


## 1 数据加载
path = "datas/iris.data"
names = ['sepal length', 'sepal width', 'petal length', 'petal width', 'cla']
df = pd.read_csv(path, header=None, names=names)
# print(df.T.head())


# 2 数据处理
def parseRecord(record):
    result = []
    r = zip(names,record)
    for name,v in r:
        if name == 'cla':
            if v ==  'Iris-setosa':
                result.append(1)
            elif v == 'Iris-versicolor':
                result.append(2)
            elif v == 'Iris-virginica':
                result.append(3)
            else:
                result.append(np.nan)
        else:
            result.append(float(v))
    return result

# 数据转换 axis = 1 表示行读取 pd.Series() 行数据
datas = df.apply(lambda r: pd.Series(parseRecord(r),index=names),axis=1)
# print(datas.head())

# 异常数据删除
datas = datas.dropna(how='any')
# print(datas.head())

# 3 数据分割处理
X = datas[names[0:-1]]
Y = datas[names[-1]]
# print(X.shape)
# print(Y.shape)
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,train_size=0.8,test_size=0.2,random_state=1)

print('原始数据个数%d 训练数据个数%d 训练数据数据特征个数%d 测试数据条数%d'%(len(X),len(X_train),
                                                 X_train.shape[1],X_test.shape[0]))

# 数据标准化
# ss = StandardScaler()
# X_train = ss.fit_transform(X_train)
# X_test = ss.fit_transform(X_test)
#

# 5 加载模型  n_neighbors 临近数目
knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(X_train,Y_train)



# 6 预测与 模型效果
knn_predict_y = knn.predict(X_test)
print('KNN算法准确率:',knn.score(X_train,Y_train))

## 画图2:预测结果画图
x_test_len = range(len(X_test))
plt.figure(figsize=(12, 9), facecolor='w')
plt.ylim(0.5,3.5)
plt.plot(x_test_len, Y_test, 'ro',markersize = 6, zorder=3, label=u'真实值')
plt.plot(x_test_len, knn_predict_y, 'yo', markersize = 16, zorder=1,
         label=u'KNN算法预测值,准确率=%.3f' % knn.score(X_test, Y_test))
plt.legend(loc = 'upper right')
plt.xlabel(u'数据编号', fontsize=18)
plt.ylabel(u'种类', fontsize=18)
plt.title(u'鸢尾花数据分类', fontsize=20)
plt.show()

效果图:


Figure_1.png

2 使用交叉验证(GridSearchCV)的方式 选取模型参数

'''
 使用交叉验证的方式 选取模型参数
'''

import pandas as pd
import numpy as np
import pickle

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib


# 1、加载数据
names = ['A', 'B', 'C', 'D', 'label']
path = './datas/iris.data'
df = pd.read_csv(path, sep=',', header = None, names = names)


# 2 数据清洗
df.replace('?', np.nan, inplace = True)
df.dropna(axis=0, how='any', inplace=True)


# 3 特征属性与特征值
X = df[names[:-1]]
Y = df[names[-1]]
Y_label_values = np.unique(Y)
Y_label_values.sort()  #['Iris-setosa' 'Iris-versicolor' 'Iris-virginica']
for i in range(len(Y_label_values)):
    # print(Y)
    print(Y_label_values)
    Y[Y == Y_label_values[i]] = I
Y = Y.astype(np.float)
# print('---%s',Y)


# 4 数据的划分(将数据划分为训练集和测试集)
x_train, x_test, y_train, y_test = train_test_split(X, Y, train_size = 0.8, test_size = 0.2,random_state=28)


# 5、特征工程
ss = StandardScaler()
x_train = ss.fit_transform(x_train, y_train)
x_test = ss.transform(x_test)


# 6 模型选择
knn = KNeighborsClassifier(algorithm='kd_tree')


# 7、使用网格交叉验证的方式来进行模型参数选择(选择在训练数据集上效果最好的参数)
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
                 n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
                 pre_dispatch='2*n_jobs', error_score='raise',
                 return_train_score=True):
        estimator: 给定需要进行参数选择的sklearn的模型对象(特征工程、算法模型、管道流....)
        param_grid:给定的一个estimator参数选择的一个字典,key为estimator中的模型参数字符串,vlaue为该参数的取值列表
        cv:给定做几折交叉验证
        scoring:给定在做模型参数选择的时候,衡量模型效果的指标,默认为estimator自带的score算法;可选值参考:http://scikit-learn.org/0.18/modules/model_evaluation.html#model-evaluation
"""

param_grid = {
    'n_neighbors': [3, 5, 10, 20],
    'weights': ['uniform', 'distance'],
    'leaf_size': [10, 30, 50]
}

algo = GridSearchCV(estimator=knn, param_grid = param_grid, cv = 5)
algo.fit(x_train, y_train)

# 8、输出最好的参数、最好的模型
best_param = algo.best_params_
print("最好的参数列表:{}".format(best_param))
best_knn = algo.best_estimator_
print("最优模型:{}".format(best_knn))
print("使用最优模型来预测:{}".format(best_knn.predict(ss.transform([[4.6, 3.4, 1.4, 0.3]]))))
print("使用GridSearchCV预测:{}".format(algo.predict(ss.transform([[4.6, 3.4, 1.4, 0.3]]))))


# 9、模型效果评估
y_hat = algo.predict(x_test)
print("在训练集上的模型效果(分类算法中为准确率):{}".format(algo.score(x_train, y_train)))
print("在测试集上的模型效果(分类算法中为准确率):{}".format(algo.score(x_test, y_test)))
print("预测值:\n{}".format(y_hat))
print("预测的实际类别:\n{}".format([np.array(Y_label_values)[int(i)] for i in y_hat]))


# 9、模型保存
# 保存y标签和name之间的映射关系
y_index_label_dict = dict(zip(range(len(Y_label_values)), Y_label_values))
pickle.dump(obj = y_index_label_dict, file = open('./model/y_label_value.pkl', 'wb'))
joblib.dump(ss, './model/ss.pkl')
# 方式一:直接保存GridSearchCV对象
joblib.dump(algo, './model/gcv.pkl')
# 方式二:保存GridSearchCV中的最优模型
joblib.dump(algo.best_estimator_, './model/knn.pkl')

print("Done!!!")

控制台: /anaconda3/envs/mlenvment/bin/python3.7 /Users/long/Desktop/ml_worksapce/MlGitHubCode/MlWorkSpacePrj/回归算法/KNN与决策树/网格交叉验证.py
/anaconda3/envs/mlenvment/lib/python3.7/site-packages/sklearn/externals/joblib/externals/cloudpickle/cloudpickle.py:47: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses
import imp
['Iris-setosa' 'Iris-versicolor' 'Iris-virginica']
/Users/long/Desktop/ml_worksapce/MlGitHubCode/MlWorkSpacePrj/回归算法/KNN与决策树/网格交叉验证.py:43: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
Y[Y == Y_label_values[i]] = I
['Iris-setosa' 'Iris-versicolor' 'Iris-virginica']
['Iris-setosa' 'Iris-versicolor' 'Iris-virginica']
/anaconda3/envs/mlenvment/lib/python3.7/site-packages/sklearn/model_selection/_search.py:841: DeprecationWarning: The default of the iid parameter will change from True to False in version 0.22 and will be removed in 0.24. This will change numeric results when test-set sizes are unequal.
DeprecationWarning)
最好的参数列表:{'leaf_size': 10, 'n_neighbors': 20, 'weights': 'distance'}
最优模型:KNeighborsClassifier(algorithm='kd_tree', leaf_size=10, metric='minkowski',

       metric_params=None, n_jobs=None, n_neighbors=20, p=2,
       weights='distance')

使用最优模型来预测:[0.]
使用GridSearchCV预测:[0.]
在训练集上的模型效果(分类算法中为准确率):1.0
在测试集上的模型效果(分类算法中为准确率):0.9333333333333333
预测值:
[0. 1. 1. 0. 2. 1. 2. 1. 1. 0. 2. 0. 1. 1. 2. 0. 2. 2. 2. 1. 0. 0. 1. 2. 1. 0. 2. 2. 0. 1.]
预测的实际类别:
['Iris-setosa', 'Iris-versicolor', 'Iris-versicolor', 'Iris-setosa', 'Iris-virginica', 'Iris-versicolor', 'Iris-virginica', 'Iris-versicolor', 'Iris-versicolor', 'Iris-setosa', 'Iris-virginica', 'Iris-setosa', 'Iris-versicolor', 'Iris-versicolor', 'Iris-virginica', 'Iris-setosa', 'Iris-virginica', 'Iris-virginica', 'Iris-virginica', 'Iris-versicolor', 'Iris-setosa', 'Iris-setosa', 'Iris-versicolor', 'Iris-virginica', 'Iris-versicolor', 'Iris-setosa', 'Iris-virginica', 'Iris-virginica', 'Iris-setosa', 'Iris-versicolor']
Done!!!

3 使用本地保存的模型 预测数值

# -*- coding: utf-8 -*-
# @Time    : 2018/12/18 上午10:11
# @Author  : scl
# @Email   : 1163820757@qq.com
# @File    : 使用交叉验证模型预测.py
# @Software: PyCharm

'''
 使用本地保存的模型 预测数值
'''
import numpy as np
import pickle
from sklearn.externals import joblib


# 1. 加载模型[5.0, 3.4, 1.5, 0.2], [6.6, 3.0, 4.4, 1.4]
ss = joblib.load('./model/ss.pkl')
gcv = joblib.load('./model/gcv.pkl')
knn = joblib.load('./model/knn.pkl')
y_label_index_2_label_dict = pickle.load(open('./model/y_label_value.pkl', 'rb'))


print(y_label_index_2_label_dict)
def predict1(x):
    return gcv.predict(ss.transform(x))

def predict2(x):
    return knn.predict(ss.transform(x))

if __name__ == '__main__':
    # 输入值 进行预测
    x = [[5.0, 3.4, 1.5, 0.2], [6.6, 3.0, 4.4, 1.4]]
    y_predict1 = predict1(x)
    print(y_predict1)
    y_predict2 = predict2(x)
    print(y_predict2)

    print(predict2)
    result_label = []
    for index in y_predict2:
        label = y_label_index_2_label_dict[int(index)]
        result_label.append(label)
    print("预测结果%s"%(result_label))
    result = list(zip(y_predict2,result_label))
    print(result)

控制台:
/anaconda3/envs/mlenvment/bin/python3.7 /Users/long/Desktop/ml_worksapce/MlGitHubCode/MlWorkSpacePrj/回归算法/KNN与决策树/使用交叉验证模型预测.py
/anaconda3/envs/mlenvment/lib/python3.7/site-packages/sklearn/externals/joblib/externals/cloudpickle/cloudpickle.py:47: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses
import imp
{0: 'Iris-setosa', 1: 'Iris-versicolor', 2: 'Iris-virginica'}
[0. 1.]
[0. 1.]
<function predict2 at 0x10dda71e0>
预测结果['Iris-setosa', 'Iris-versicolor']
[(0.0, 'Iris-setosa'), (1.0, 'Iris-versicolor')]

相关文章

  • KNN与K-Means算法的区别

    内容参考:Kmeans算法与KNN算法的区别kNN与kMeans聚类算法的区别 KNN-近邻算法-分类算法 思想:...

  • knn算法

    knn算法 knn算法简介 邻近算法,或者说K最近邻(kNN,k-NearestNeighbor)分类算法。所谓K...

  • KNN近邻算法总结

    目录 一、KNN近邻算法思想 二、KNN模型三大要素 三、KNN算法实现步骤 四、KNN算法的KD树实现 五、总结...

  • 机器学习笔记汇总

    kNN算法:K最近邻(kNN,k-NearestNeighbor)分类算法

  • 01 KNN算法 - 概述

    KNN算法全称是K近邻算法 (K-nearst neighbors,KNN) KNN是一种基本的机器学习算法,所谓...

  • 利用Python进行数字识别

    思路 通过Python实现KNN算法。而KNN算法就是K最近邻(k-Nearest Neighbor,KNN)分类...

  • 机器学习系列(六)——knn算法原理与scikit-learn底

    KNN算法 本篇将介绍knn算法,knn算法因为思想非常简单,运用的数学知识比较浅显,是非常适合机器学习入门的算法...

  • kNN算法

    一. kNN算法 kNN(k-NearestNeighbor),即k最近邻算法,是机器学习算法中最基础的入门算法。...

  • 机器学习笔记:K-近邻算法(KNN)

    一、介绍 KNN算法称为邻近算法,或者说K邻近算法(kNN,k-NearestNeighbor),分类算法。 KN...

  • 降维与度量学习

    1、kNN kNN算法即k近邻算法,是常用的有监督学习算法。它是懒惰学习的代表算法,没有显式的训练过程。kNN在收...

网友评论

    本文标题:KNN算法

    本文链接:https://www.haomeiwen.com/subject/vblghqtx.html