可视化网络模型
使用Netscope在线可视化
Netscope
Netscope能可视化神经网络体系结构(或技术上说,Netscope能可视化任何有向无环图)。目前Netscope能可视化Caffe的prototxt 文件。网址为:ethereon.github.io/netscope/#/…Netscope的使用非常简单,只需要将prototxt的文件复制到Netscope的编辑框,再按快捷键Shift+Enter即可得到网络模型的可视化结构。Netscope的优点是显示的网络模型简洁,而且将鼠标放在右侧可视化的网络模型的任意模块上,会显示该模块的具体参数。图1以Faster R-CNN中ZF模型的train.prototxt文件为例
可视化图像特征
关于图像的可视化,我也使用过两种两种方式:
修改demo.py代码输出中间层结果
使用可视化工具deep-visualization-toolbox
修改demo.py
该部分是参考薛开宇的《caffe学习笔记》中的逐层特征可视化部分,还是以ZFNet网络训练Pascal VOC为例,修改demo.py文件后,代码如下:
#!/usr/bin/env python
#-*-coding:utf-8-*-
import matplotlib
matplotlib.use('Agg')
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'zf_faster_rcnn_iter_2000.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [zf]',
choices=NETS.keys(), default='zf')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
#指定caffe路径,以下是我的caffe路径
caffe_root='/home/ouyang/GitRepository/py-faster-rcnn/caffe-fast-rcnn/'
# import sys
sys.path.insert(0, caffe_root+'python')
# import caffe
# #显示的图表大小为 10,图形的插值是以最近为原则,图像颜色是灰色
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
image_file = caffe_root+'examples/images/vehicle_0000015.jpg'
# 载入模型
npload = caffe_root+ 'python/caffe/imagenet/ilsvrc_2012_mean.npy'
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(npload).mean(1).mean(1))
# 参考模型的灰度为0~255,而不是0~1
transformer.set_raw_scale('data', 255)
# 由于参考模型色彩是BGR,需要将其转换为RGB
transformer.set_channel_swap('data', (2,1,0))
im=caffe.io.load_image(image_file)
net.blobs['data'].reshape(1,3,224,224)
net.blobs['data'].data[...] = transformer.preprocess('data',im)
# 显示出各层的参数和形状,第一个是批次,第二个是feature map数目,第三和第四是每个神经元中图片的长和宽
print [(k,v.data.shape) for k,v in net.blobs.items()]
#输出网络参数
print [(k,v[0].data.shape) for k,v in net.params.items()]
def show_image(im):
if im.ndim==3:
m=im[:,:,::-1]
plt.imshow(im)
#显示图片的方法
plt.axis('off') # 不显示坐标轴
plt.show()
# 每个可视化的都是在一个由一个个网格组成
def vis_square(data,padsize=1,padval=0):
data-=data.min()
data/=data.max()
# force the number of filters to be square
n=int(np.ceil(np.sqrt(data.shape[0])))
padding=((0,n**2-data.shape[0]),(0,padsize),(0,padsize))+((0,0),)*(data.ndim-3)
data=np.pad(data,padding,mode='constant',constant_values=(padval,padval))
# 对图像使用滤波器
data=data.reshape((n,n)+data.shape[1:]).transpose((0,2,1,3)+tuple(range( 4,data.ndim+1)))
data=data.reshape((n*data.shape[1],n*data.shape[3])+data.shape[4:])
#show_image(data)
plt.imshow(data)
plt.show()
# 设置图片的保存路径,此处是我的路径
plt.savefig("./tools/Vehicle_2000/fc6.jpg")
out = net.forward()
image=net.blobs['data'].data[4].copy()
image-=image.min()
image/=image.max()
# 显示原始图像
show_image(image.transpose(1,2,0))
#网络提取conv1的卷积核
filters = net.params['conv1'][0].data
vis_square(filters.transpose(0, 2, 3, 1))
#过滤后的输出,96 张 featuremap
feat =net.blobs['conv1'].data[0,:96]
vis_square(feat,padval=1)
#第二个卷积层,显示全部的96个滤波器,每一个滤波器为一行。
filters = net.params['conv2'][0].data
vis_square(filters[:96].reshape(96**2, 5, 5))
# #第二层输出 256 张 featuremap
feat = net.blobs['conv2'].data[0]
vis_square(feat, padval=1)
filters = net.params['conv3'][0].data
vis_square(filters[:256].reshape(256**2, 3, 3))
# 第三个卷积层:全部 384 个 feature map
feat = net.blobs['conv3'].data[0]
vis_square(feat, padval=0.5)
#第四个卷积层,我们只显示前面 48 个滤波器,每一个滤波器为一行。
filters = net.params['conv4'][0].data
vis_square(filters[:384].reshape(384**2, 3, 3))
# 第四个卷积层:全部 384 个 feature map
feat = net.blobs['conv4'].data[0]
vis_square(feat, padval=0.5)
# 第五个卷积层:全部 256 个 feature map
filters = net.params['conv5'][0].data
vis_square(filters[:384].reshape(384**2, 3, 3))
feat = net.blobs['conv5'].data[0]
vis_square(feat, padval=0.5)
#第五个 pooling 层
feat = net.blobs['fc6'].data[0]
vis_square(feat, padval=1)
第六层输出后的直方分布
feat=net.blobs['fc6'].data[0]
plt.subplot(2,1,1)
plt.plot(feat.flat)
plt.subplot(2,1,2)
_=plt.hist(feat.flat[feat.flat>0],bins=100)
# #显示图片的方法
#plt.axis('off') # 不显示坐标轴
plt.show()
plt.savefig("fc6_zhifangtu.jpg")
# 第七层输出后的直方分布
feat=net.blobs['fc7'].data[0]
plt.subplot(2,1,1)
plt.plot(feat.flat)
plt.subplot(2,1,2)
_=plt.hist(feat.flat[feat.flat>0],bins=100)
plt.show()
plt.savefig("fc7_zhifangtu.jpg")
#看标签
#执行测试
image_labels_filename=caffe_root+'data/ilsvrc12/synset_words.txt'
#try:
labels=np.loadtxt(image_labels_filename,str,delimiter='\t')
top_k=net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]
#print labels[top_k]
for i in np.arange(top_k.size):
print top_k[i], labels[top_k[i]]
下面贴几张检测结果
图3 原始检测图片
图4 conv1参数可视化
图5 conv1特征可视化
deep-visualization-toolbox
deep-visualization-toolbox是Jason Yosinsk出版在Computer Science上的一篇论文的源代码,改论文主要讲述的是卷积神经网络的可视化,感兴趣的朋友可以看看这篇论文(论文地址)。B站上有个讲怎么使用该工具的视频,这里附上链接www.bilibili.com/video/av740…。 该工具的源码在github:github.com/yosinski/de…。该github下有完整的安装配置步骤,还是以图2中的马为例,贴几张检测结果图。
图6 ToolBox conv1特征可视化
图7 ToolBox conv2特征可视化
从检测效果上看,还是挺简洁的。图片左侧的一列图片左上角是输入图片,中间部分是图片经过网络前向传播得到的特征图可视化,左下角是其特征可视化。
Loss可视化
网络训练过程中Loss值的可视化可以帮助分析该网络模型的参数是否合适。在使用Faster R-CNN网络训练模型时,训练完成后的日志文件中保存了网络训练各个阶段的loss值,如图8所示。只用写简单的python程序,读取日志文件中的迭代次数,以及需要的损失值,再画图即可完成Loss的可视化。
图8 模型的训练日志
在下面贴出Loss可视化的代码:
#!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import math
import re
import pylab
from pylab import figure, show, legend
from mpl_toolkits.axes_grid1 import host_subplot
# 日志文件名
fp = open('faster_rcnn_end2end_ZF_.txt.2018-04-13_19-46-23', 'r',encoding='UTF-8')
train_iterations = []
train_loss = []
test_iterations = []
#test_accuracy = []
for ln in fp:
# get train_iterations and train_loss
if '] Iteration ' in ln and 'loss = ' in ln:
arr = re.findall(r'ion \b\d+\b,',ln)
train_iterations.append(int(arr[0].strip(',')[4:]))
train_loss.append(float(ln.strip().split(' = ')[-1]))
fp.close()
host = host_subplot(111)
plt.subplots_adjust(right=0.8) # ajust the right boundary of the plot window
#par1 = host.twinx()
# set labels
host.set_xlabel("iterations")
host.set_ylabel("RPN loss")
#par1.set_ylabel("validation accuracy")
# plot curves
p1, = host.plot(train_iterations, train_loss, label="train RPN loss")
.
host.legend(loc=1)
# set label color
host.axis["left"].label.set_color(p1.get_color())
host.set_xlim([-1000, 60000])
host.set_ylim([0., 3.5])
plt.draw()
plt.show()
可视化效果如下图所示
图9 Loss可视化
画PR图
参考:https://github.com/rbgirshick/py-faster-rcnn/issues/670
在pascal_voc.py里添加几行代码即可:
1,文件头部:
importmatplotlib.pyplot as plt
importpylab as pl
from sklearn.metricsimportprecision_recall_curve
from itertoolsimportcycle
2,_do_python_eval函数:
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC'+self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC'+self._year,
'ImageSets',
'Main',
self._image_set +'.txt')
cachedir = os.path.join(self._devkit_path,'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric =Trueifint(self._year) <2010elseFalse
print('VOC07 metric? '+ ('Yes'ifuse_07_metricelse'No'))
ifnot os.path.isdir(output_dir):
os.mkdir(output_dir)
fori, cls in enumerate(self._classes):
ifcls =='__background__':
continue
filename =self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
pl.plot(rec, prec, lw=2,
label='Precision-recall curve of class {} (area = {:.4f})'
''.format(cls, ap))
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls +'_pr.pkl'),'wb')asf:
pickle.dump({'rec': rec,'prec': prec,'ap': ap}, f)
pl.xlabel('Recall')
pl.ylabel('Precision')
plt.grid(True)
pl.ylim([0.0,1.05])
pl.xlim([0.0,1.0])
pl.title('Precision-Recall')
pl.legend(loc="upper right")
plt.show()
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
forap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
然后运行test_net.py,就可以得到如下图的PR曲线。如果想比较多条曲线,可以先把rec, prec数据存起来再画图。
网友评论