最近在读李航老师的《统计学习方法》,读到第三章的k近邻算法时,在N>>k时遍历搜索比较费时,为了更高效的搜索可以采用kd Tree的方式组织Training数据,我看到一篇博客,前面的图示理解部分说的比较到位,不过博主的代码有些问题,包括:
- 代码的完整逻辑是不对的
- 并没有对已搜索的节点进行标注,导致重复计算(走回头路),这样整个kd Tree的初衷就完全没意义了
于是在该代码基础上改了一版正确的,带了一些调试信息。
- 更正代码逻辑
- 代码clean up
- 更新为python 3兼容
- 增加up_traced标志避免走回头路,支持多次搜索,每次搜索前做好该标志的清理
# -*- coding: utf-8 -*-
import numpy as np
class Node:
def __init__(self, data, parent, dim):
self.data = data
self.parent = parent
self.lChild = None
self.rChild = None
self.dim = dim
# only track search_Up process
self.up_traced = False
def setLChild(self, lChild):
self.lChild = lChild
def setRChild(self, rChild):
self.rChild = rChild
class KdTree:
def __init__(self, train):
self.root = self.__build(train, 1, None)
def __build(self, train, depth, parent): # 递归建树
(m, k) = train.shape
if m == 0:
return None
train = train[train[:, depth % k].argsort()]
root = Node(train[m//2], parent, depth % k)
root.setLChild(self.__build(train[:m//2, :], depth+1, root))
root.setRChild(self.__build(train[m//2+1:, :], depth+1, root))
return root
def findNearestPointAndDistance(self, point): # 查找与point距离最近的点
point = np.array(point)
node = self.__findSmallestSubSpace(point, self.root)
print("Start node:", node.data)
return self.__searchUp(point, node, node, np.linalg.norm(point - node.data))
def __searchUp(self, point, node, nearestPoint, nearestDistance):
if node.parent is None:
return [nearestPoint, nearestDistance]
print("UP:", node.parent.data)
node.parent.up_traced = True
distance = np.linalg.norm(node.parent.data - point)
if distance < nearestDistance:
nearestDistance = distance
nearestPoint = node.parent
distance = np.abs(node.parent.data[node.dim] - point[node.parent.dim])
if distance < nearestDistance:
[p, d] = self.__searchDown(point, node.parent)
if d < nearestDistance:
nearestDistance = d
nearestPoint = p
[p, d] = self.__searchUp(point, node.parent, nearestPoint, nearestDistance)
if d < nearestDistance:
nearestDistance = d
nearestPoint = p
return [nearestPoint, nearestDistance]
def __searchDown(self, point, node):
nearestDistance = np.linalg.norm(node.data - point)
nearestPoint = node
print("DOWN:", node.data)
if node.lChild is not None and node.lChild.up_traced is False:
[p, d] = self.__searchDown(point, node.lChild)
if d < nearestDistance:
nearestDistance = d
nearestPoint = p
if node.rChild is not None and node.rChild.up_traced is False:
[p, d] = self.__searchDown(point, node.rChild)
if d < nearestDistance:
nearestDistance = d
nearestPoint = p
print("---- ", nearestPoint.data, nearestDistance)
return [nearestPoint, nearestDistance]
def __findSmallestSubSpace(self, point, node): # 找到这个点所在的最小的子空间
"""
从根节点出发,递归地向下访问kd树。如果point当前维的坐标小于切分点的坐标,则
移动到左子节点,否则移动到右子节点。直到子节点为叶节点为止。
"""
# New search: clean up up_traced flag for all up path nodes
node.up_traced = False
if point[node.dim] < node.data[node.dim]:
if node.lChild is None:
return node
else:
return self.__findSmallestSubSpace(point, node.lChild)
else:
if node.rChild is None:
return node
else:
return self.__findSmallestSubSpace(point, node.rChild)
train = np.array([[2, 3], [5, 4], [9, 6], [4, 7], [8, 1], [7, 2]])
train = np.array([[2, 5], [3, 2], [3, 7], [8, 3], [6, 6], [1, 1], [1, 8]])
kdTree = KdTree(train)
target = np.array([6, 4])
print('##### target :', target)
[p, d] = kdTree.findNearestPointAndDistance(target)
print(p.data, d)
print('---------------------')
(m, k) = train.shape
for i in range(m):
print(train[i], np.linalg.norm(train[i]-target))
target = np.array([2, 2])
print('')
print('##### target :', target)
[p, d] = kdTree.findNearestPointAndDistance(target)
print(p.data, d)
print('---------------------')
for i in range(m):
print(train[i], np.linalg.norm(train[i]-target))
网友评论