开始拿到的书log日志是这样的
#FrameTime - 20200212-090442_3268251#ExpoCoarseIntegrationTime - 973#ExpoAnalogRedGain - 75#ExpoAnalogGreen1Gain - 75#ExpoAnalogGreen2Gain - 75#ExpoAnalogBlueGain - 23070#LogAverageLuma - 2794#Brightness - 1702
#FrameTime - 20200212-090442_3268278#ExpoCoarseIntegrationTime - 973#ExpoAnalogRedGain - 75#ExpoAnalogGreen1Gain - 75#ExpoAnalogGreen2Gain - 75#ExpoAnalogBlueGain - 27328#LogAverageLuma - 2810#Brightness - 1718
#FrameTime - 20200212-090443_3268304#ExpoCoarseIntegrationTime - 973#ExpoAnalogRedGain - 75#ExpoAnalogGreen1Gain - 75#ExpoAnalogGreen2Gain - 75#ExpoAnalogBlueGain - 30392#LogAverageLuma - 2792#Brightness - 1700
#FrameTime - 20200212-090443_3268301#ExpoCoarseIntegrationTime - 973#ExpoAnalogRedGain - 75#ExpoAnalogGreen1Gain - 75#ExpoAnalogGreen2Gain - 75#ExpoAnalogBlueGain - 27780#LogAverageLuma - 2793#Brightness - 1701
#FrameTime - 20200212-090443_3268328#ExpoCoarseIntegrationTime - 973#ExpoAnalogRedGain - 75#ExpoAnalogGreen1Gain - 75#ExpoAnalogGreen2Gain - 75#ExpoAnalogBlueGain - 26550#LogAverageLuma - 2810#Brightness - 1718
首先按行对他们进行提取
因为每个数据后边都有个-
所以用这个符号把这些数据行分割
for line in open(logFile):
list = line.split('-')
if list[0] == '#FrameTime ':
num.append(list[2].split('_')[0])
num1.append(list[3].split('#')[0])
num2.append(list[8].split('#')[0])
num3.append(list[9].split('\n')[0])
num4.append(list[7].split('#')[0])
num5.append(list[6].split('#')[0])
num6.append(list[5].split('#')[0])
num7.append(list[4].split('#')[0])
因为log日志开始的几行并不是数据,所以我们加了一个筛选条件。
并把他们分别存在不同的list数据中。
因为取出来的都是字符串格式的数据。
所以我们需要把他们都转化为int格式的数据,方便后期进行处理。
num3 = [ int(x) for x in num3 ]
num2 = [ int(x) for x in num2 ]
num1 = [ int(x) for x in num1 ]
num4 = [ int(x) for x in num4 ]
num5 = [ int(x) for x in num5 ]
num6 = [ int(x) for x in num6 ]
num7 = [ int(x) for x in num7 ]
num = [ int(x) for x in num ]
取出的数据是未进行处理的是数据,噪点是比较多的,所以我们需要对噪点进行处理。
我们这里用中值滤波的方法先对数据进行处理。
def median(lists):
for i,x in enumerate(lists):
if i <= len(lists) -3 and i >= 2:
lists[i] = np.median([lists[i-1],lists[i-2],lists[i+1],lists[i+2],lists[i]])
return lists
data = [num1,num2,num3,num4,num5,num6,num7]
for i ,x in enumerate(data):
x = median(x)
对所有数据都进行中值滤波
进行滤波后的数据噪点已经是比较少了,但是还需要对数据进行卡尔曼滤波的方式进行处理一下。
class vpTrackKalman(object):
def __init__(self, statePre0, errCovPost0, procNoiseCov, measureNoiseCov):
#filter parameter
self.kalman = cv2.KalmanFilter(2, 1)
self.kalman.measurementMatrix = np.array([[1, 0]], np.float32) ## 测量矩阵
self.kalman.transitionMatrix = np.array([[1, 1], [0, 1]], np.float32) ##状态转移矩阵
self.kalman.processNoiseCov = procNoiseCov ## 系统误差
self.kalman.measurementNoiseCov = measureNoiseCov ## 测量误差
#initial state
self.kalman.statePre = copy.deepcopy(statePre0) ## 预测值
self.kalman.errorCovPost = copy.deepcopy(errCovPost0) ## 修真的最小均方误差
self.predicted = None
self.corrected = None
def update(self, stateMeasured):
self.corrected = self.kalman.correct(stateMeasured)
self.predicted = self.kalman.predict()
def getCorrected(self):
return self.corrected
def getPredicted(self):
return self.predicted
def getState(self):
return self.kalman.statePre, self.kalman.statePost ## 预测值 状态值
statePre0 = np.float32(np.array([[380], [0]])) ## 预测值
errCovPost0 = np.float32(np.array([[20], [10]])) ## 修正最小均方差误差
procNoiseCov = np.float32(0.01 * np.eye(2)) ## 系统误差
measureNoiseCov = np.array([[10]], dtype = np.float32) ## 测量误差
vpKalman = vpTrackKalman(statePre0, errCovPost0, procNoiseCov, measureNoiseCov)
res = []
res_brightness = []
for i in range(len(num7)):
vpKalman.update(np.array([num7[i]], np.float32))
vpYTracked = vpKalman.getCorrected()
res.append(vpYTracked)
#print (res)
for i in res:
res_brightness.append(i[0])
这里介绍了对数据7进行的卡尔曼滤波的方法,首选我们初始化了首个预测值,修正的均方差,系统误差,以及测量误差。
在定义了卡尔曼滤波的方法后,我们初始化了该方法,传入了初始值。
并通过不管的取出需要滤波的值,进行处理,并一个一个的塞进处理好的数据格式中。
plt.subplot(2,5,6)
x_value = num[9696:9808]
y_value = res_brightness[9696:9808]
# x_value = num
# y_value = num3
plt.scatter(x_value,y_value,c='r',s = 4)
plt.subplot(2,5,7)
x_value = num[566:829]
y_value = res_brightness[566:829]
plt.scatter(x_value,y_value,c='r',s = 4)
plt.subplot(2,5,8)
x_value = num[6566:6705]
y_value = res_brightness[6566:6705]
plt.scatter(x_value,y_value,c='r',s = 4)
plt.subplot(2,5,9)
x_value = num[6855:7065]
y_value = res_brightness[6855:7065]
plt.scatter(x_value,y_value,c='r',s = 4)
plt.subplot(2,5,10)
x_value = num[4435:5020]
y_value = res_brightness[4435:5020]
plt.scatter(x_value,y_value,c='r',s = 4) # r 代表红色的意思
plt.show()
接下来就是把处理后的数据画出来,发现有了很大的改观。这里我定义了2行5列共10张图进行对比效果。
整张提通过散点图的方式进行呈现,并通过不同的颜色进行整理。处理过的为红色,未处理的为蓝色。
接下来为了方便进行筛选,我把数据格式变换为dataframe的格式
## 对每个时间段的数据进行整理
test_in_garage = {'time':num[9696:9808],'brightness':num3[9696:9808],'ExpoCoarseIntegrationTime':num1[9696:9808],
'LogAverageLuma':num2[9696:9808],'ExpoAnalogBlueGain':num4[9696:9808],'ExpoAnalogGreen2Gain':num5[9696:9808],
'ExpoAnalogRedGain':num7[9696:9808]}
test_in_garage_df = pd.DataFrame(test_in_garage)# 进车库
test_out_garage = {'time':num[566:829],'brightness':num3[566:829],'ExpoCoarseIntegrationTime':num1[566:829],
'LogAverageLuma':num2[566:829],'ExpoAnalogBlueGain':num4[566:829],'ExpoAnalogGreen2Gain':num5[566:829],
'ExpoAnalogRedGain':num7[566:829]}
test_out_garage_df = pd.DataFrame(test_out_garage) #出车库
test_in_tunnel = {'time':num[6566:6705],'brightness':num3[6566:6705],'ExpoCoarseIntegrationTime':num1[6566:6705],
'LogAverageLuma':num2[6566:6705],'ExpoAnalogBlueGain':num4[6566:6705],'ExpoAnalogGreen2Gain':num5[6566:6705],
'ExpoAnalogRedGain':num7[6566:6705]}
test_in_tunnel_df = pd.DataFrame(test_in_tunnel) #进隧道
test_out_tunnel = {'time':num[6855:7065],'brightness':num3[6855:7065],'ExpoCoarseIntegrationTime':num1[6855:7065],
'LogAverageLuma':num2[6855:7065],'ExpoAnalogBlueGain':num4[6855:7065],'ExpoAnalogGreen2Gain':num5[6855:7065],
'ExpoAnalogRedGain':num7[6855:7065]}#出隧道
test_out_tunnel_df = pd.DataFrame(test_out_tunnel)
test_in_out_tunnel = {'time':num[4435:5020],'brightness':num3[4435:5020],'ExpoCoarseIntegrationTime':num1[4435:5020],
'LogAverageLuma':num2[4435:5020],'ExpoAnalogBlueGain':num4[4435:5020],'ExpoAnalogGreen2Gain':num5[4435:5020],
'ExpoAnalogRedGain':num7[4435:5020]}#进出隧道
test_in_out_tunnel_df = pd.DataFrame(test_in_out_tunnel)
对7组数据按照不同的时段整理成7个dataframe的格式数据
接下来我们的任务是需要读取视频并把判断的结果做成字幕动态生成到视屏中。
整体思路是视频按照帧提取出来的,然后整理出来log和帧数的对比关系,发现是1:8.5 我就用了1:9的对比。
然后就是把需要的东西存在每帧中。
并把提取出来的帧进行保存,并生成对应的视屏文件。
vedioCapture = cv.VideoCapture('/Users/qiao/Desktop/领目科技/数据/进车库_20200213_095159_06.mp4_20200213_132513.mp4') # 获取视屏
fps = vedioCapture.get(cv.CAP_PROP_FPS) # 获取视频帧速率
print('fps',fps)
size = (int(vedioCapture.get(cv.CAP_PROP_FRAME_WIDTH)),
int(vedioCapture.get(cv.CAP_PROP_FRAME_HEIGHT))) # 获取视频尺寸
videoWrite = cv.VideoWriter(r"/Users/qiao/Desktop/领目科技/数据/img/1.avi",
cv.VideoWriter_fourcc("I", "4", "2", "0"), fps, size) # 存视频为avi文件格式
success,frame = vedioCapture.read() #读取一帧
timeF = 9
c = 0
i = 0
textPitch = 'close'
while success:
if (c % timeF == 0):
if (test_in_garage_df['brightness'][i] < 3000 and test_in_garage_df['ExpoCoarseIntegrationTime'][i] > 200):# 进行判断
textPitch = "open"
else:
textPitch = 'close'
frame = cv.putText(frame, textPitch, (50, 50), cv.FONT_HERSHEY_SIMPLEX, 1.2, (255, 255, 255), 2) # 在每帧中添加字符串数据
i = i + 1
cv.imshow("zd1",frame)
cv.waitKey(int(1000/fps)) # 1000毫秒/帧速率
videoWrite.write(frame) # 写视频帧
c = c + 1
success, frame = vedioCapture.read() # 获取下一帧
print(c)
vedioCapture.release()
videoWrite.release()
cv.destroyAllWindows()
处理过的视频截图
网友评论