美文网首页深度学习
2020 无人驾驶(7)之车道线检测

2020 无人驾驶(7)之车道线检测

作者: zidea | 来源:发表于2020-09-17 20:36 被阅读0次
    tesla_001.jpg

    读取图像

    • 使用 python-opencv 读取图像
    • 使用 pygame 显示图像,这是一贯的套路
    from __future__ import print_function
    import os
    import cv2
    
    import pygame
    from pygame.locals import *
    
    class Display(object):
        def __init__(self,W,H):
            self.W = W
            self.H = H
            pygame.init()
            pygame.display.set_caption("self Drive")
            
            self.screen = pygame.display.set_mode((self.W,self.H),0,32)
        def paint(self,frame):
            self.screen.fill([0,0,0])
    
            img = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
            img = cv2.transpose(img)
            img = pygame.surfarray.make_surface(img)
    
    
            self.screen.blit(img,(0,0))
            pygame.display.update()
    
    binary_warped = img.astype('uint8')
    
    
    # coding=utf-8
    import numpy as np
    import cv2
    import glob
    import matplotlib.pyplot as plt
    import matplotlib.image as mpimg
    
    from display import Display
    import pygame
    from pygame.locals import *
    
    if __name__ == "__main__":
    
        cap = cv2.VideoCapture("project_video.mp4")
        
        Width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        Height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    
        Width = Width // 2
        Height = Height // 2
    
        src = np.float32([
            [678, 440],[1279, 720],
            [0, 720],[595, 440]])//2
        
    
        dst = np.float32([
            [1180, 0],[1180, 720],
            [100, 720],[100, 0]])//2
    
        display = Display(Width,Height)
    
        def sliding_windows_search(img):
            binary_warped = img.astype('uint8')
            return binary_warped
    
        while cap.isOpened():
            ret, frame = cap.read()
            frame = cv2.resize(frame,(Width,Height))
    
            # print(frame)
            M = cv2.getPerspectiveTransform(src, dst)
            img_size = (frame.shape[1], frame.shape[0])
            warped = cv2.warpPerspective(frame, M, img_size, flags=cv2.INTER_NEAREST)
            display.paint(warped)
            for event in pygame.event.get():
                if event.type == QUIT:
                    sys.exit()
                if event.type == pygame.KEYUP:
                    if event.key == ord('s'):
                        cv2.imwrite("wrapper_image_001.jpg",warped)
                    if event.key == ord('q'):
                        pygame.quit()
                        sys.exit()
        # M = cv2.getPerspectiveTransform(src, dst)
        # Minv = cv2.getPerspectiveTransform(dst, src)
    

    bird-eye 图效果

    现在思路与上一次思路略有不同,上一次我们同颜色和区域选择后得到图进行 canny 边缘检测,然后在 canny 边缘检测的基础上进行霍夫变换来检测车道线,
    <img src="./wrapper_image_001.jpg">

    M = cv2.getPerspectiveTransform(src, dst)
            img_size = (frame.shape[1], frame.shape[0])
            warped = cv2.warpPerspective(frame, M, img_size, flags=cv2.INTER_NEAREST)
    

    重点我们是说如何通过滑动窗口来实现车道线检测的,首先我们将经过处理灰度图读取,并将类型转换为uint8

    def sliding_windows_search(img):
        binary_warped = img.astype('uint8')
        return binary_warped
    
    def sliding_windows_search(img):
        binary_warped = img.astype('uint8')
        out_img = np.dstack((binary_warped, binary_warped, binary_warped))
        return out_img
    

    HLS(Hue, Saturation, Lightness) 色彩空间

    • 色相(Hue): 是色彩的基本属性,就是平常所说的颜色名称,如红色、黄色等。
    • 饱和度(Saturation):是指色彩的纯度,越高色彩越纯,低则逐渐变灰,取0-100%的数值。
    • 亮度(Lightness):亮度(L),取0-100%。

    通过将图像转换到特定色彩空间边缘我们将

    def color_thresh(img, s_thresh=(0,255), v_thresh=(0,255)):
        # 将颜色转换为  HLS 颜色空间然后提取(分离) S 通道
        hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
        s_channel = hls[:,:,2]
        s_binary = np.zeros_like(s_channel)
        s_binary[(s_channel>=s_thresh[0]) & (s_channel<=s_thresh[1])] = 1
        
        # 将颜色转换为  HSV 颜色空间然后提取(分离)出 V 通道
        hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
        v_channel = hsv[:,:,2]
        v_binary = np.zeros_like(v_channel)
        v_binary[(v_channel>=v_thresh[0]) & (v_channel<=v_thresh[1])] = 1   
        
        c_binary = np.zeros_like(s_channel)
        c_binary[(s_binary==1) & (v_binary==1)] = 1
    

    这次与上一次简单地在 RGB 3 个通道分别做阈值不同,我们通过颜色空间变换然后分别在 HSV 和 HSL 空间表示亮度的值 value 和 light 上来进行过滤,提取车道线位置。

    c_binary[(s_binary==1) & (v_binary==1)] = 1
    

    滑动窗口进行车道线搜索,

    def sliding_windows_search(img):
        # 将输入图像类型转换为 uint8
        binary_warped = img.astype('uint8')
        # 然后将输入 binary_warped 高度截取一半,对每一个列像素值求和
        histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
        # midpoint = np.int(histogram.shape[0]/2)
        out_img = np.dstack((binary_warped, binary_warped, binary_warped))
        # print(type(histogram[1]))
        # print(len(histogram))
    
        for xv, hv in enumerate(histogram):
            # print(xv,hv)
            cv2.circle(out_img,(int(xv),int(hv//255)),1,(255,0,0),2)
    
        midpoint = np.int(histogram.shape[0]/2)
    
        
        # print(midpoint)
    
        return out_img
    
    • 首先将图片转换为灰度图,像素类型转换为 uinit8
    • 然后对图片取颜色直方图
    • 计算直方图的中点位置
    midpoint = np.int(histogram.shape[0]/2)
    
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint
    
    cv2.circle(out_img,(leftx_base,0),5,(0,255,0),5)
    cv2.circle(out_img,(rightx_base,0),5,(0,255,0),5)
    
    histgram_screen_capture_01.jpg

    如图两个绿色点位置表示出根据颜色直方图检测出车道线起始位置。

    for xv, hv in enumerate(histogram):
        print(xv,hv)
        cv2.circle(out_img,(int(xv),int(hv//255)),1,(255,0,0),1)
    
    histgram_screen_capture.jpg
    a = np.array([
        [1,2,0],
        [2,0,1]
        ])
    b = a.nonzero()
    print(b)
    
    (array([0, 0, 1, 1]), array([0, 1, 0, 2]))
    
    margin = 100
        # Set minimum number of pixels found to recenter window
        minpix = 50
        # Create empty lists to receive left and right lane pixel indices
        left_lane_inds = []
        right_lane_inds = []
    
        # Step through the windows one by one
        for window in range(nwindows):
            # Identify window boundaries in x and y (and right and left)
            win_y_low = binary_warped.shape[0] - (window+1)*window_height
            win_y_high = binary_warped.shape[0] - window*window_height
            win_xleft_low = leftx_current - margin
            win_xleft_high = leftx_current + margin
            win_xright_low = rightx_current - margin
            win_xright_high = rightx_current + margin
            # 将检测窗口可视化
            cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) 
            cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2) 
    
        for window in range(nwindows):
            # 因为车道线是 x 轴(width)方向移动,在 y 轴上进行 nwindows 等分,每一个滑动窗口上边和下边位置
            win_y_low = binary_warped.shape[0] - (window+1)*window_height
            win_y_high = binary_warped.shape[0] - window*window_height
    
            # 计算出检测窗口位置,margin 对滑动检测窗口左右空间扩展,margin 这里还是翻译为扩展比较好
            win_xleft_low = leftx_current - margin
            win_xleft_high = leftx_current + margin
            win_xright_low = rightx_current - margin
            win_xright_high = rightx_current + margin
    
            # 将每一个滑动窗口绘制出来
            cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) 
            cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2) 
            
            # 识别出所有在窗口内的非 0 点的像素位置
            good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
    
            good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
            
            # 将这点放到对应 list 中
            left_lane_inds.append(good_left_inds)
            right_lane_inds.append(good_right_inds)
            
            # If you found > minpix pixels, recenter next window on their mean position
            if len(good_left_inds) > minpix:
                leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
            if len(good_right_inds) > minpix:        
                rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
    
    
    img_array = np.array([[1,2,3],
        [2,3,1],
        [2,3,2]])
    print(img_array.shape)
    
    histogram = np.sum(img_array[:,:],axis=0)
    print(histogram)
    
    (3, 3)
    [5 8 6]
    
    img_col_arr = np.dstack((img_array,img_array,img_array))
    print(img_col_arr.shape)
    
    (3, 3, 3)
    

    相关文章

      网友评论

        本文标题:2020 无人驾驶(7)之车道线检测

        本文链接:https://www.haomeiwen.com/subject/xweryktx.html