opencv数据预处理与神经网络实现刷脸识别
import numpy as np
import scipy.io as sio
import tensorflow as tf
# 1) 导入数据
f = open('Yale_64x64.mat', 'rb') # 打开将Yale人脸数据集转换为matlab的文件
mdict = sio.loadmat(f) # scipy.io读取matlab文件为一个字典
train_data = mdict['fea'] # 读取其中的特征字段
train_label = mdict['gnd'] # 读取其中的类别字段
# 随机排列训练数据和训练标签, 取部分作为测试数据与测试标签
train_data = np.random.permutation(train_data)
train_label = np.random.permutation(train_label)
test_data = train_data[0 : 64]
test_label = train_label[0 : 64]
print("start0")
# 使用相同参数的seed法,将测试数据与标签对应排列
np.random.seed(100)
test_data = np.random.permutation(test_data)
np.random.seed(100)
test_label = np.random.permutation(test_label)
# 2) 产生训练和测试数据
# 使训练数据为Nx32x32x1维,数据类型为np.float32,归一化( / 255)
train_data = train_data.reshape(train_data.shape[0], 64, 64, 1).astype(np.float32) / 255
train_labels_new = np.zeros((165, 15)) # 15人的165张图片
for i in range(0, 165): # 遍历165张图片
j = int(train_label[i, 0]) - 1 # 图片对应的人
train_labels_new[i, j] = 1 # 第i图属第j人
test_data_input = test_data.reshape(test_data.shape[0], 64, 64, 1).astype(np.float32) / 255
test_labels_input = np.zeros((64, 15)) # 测试数据为64张图
for i in range(0, 64):
j = int(test_label[i, 0]) - 1
test_labels_input[i, j] = 1
# 3) cnn设计与acc计算 使用tf.layers
data_input = tf.placeholder(tf.float32, [None, 64, 64, 1]) # 设置数据的占位符
label_input = tf.placeholder(tf.float32, [None, 15]) # 设置标签的占位符
print("start")
layer1 = tf.layers.conv2d(inputs = data_input, filters = 32, kernel_size = 2,
strides = 1, padding = 'SAME', activation = tf.nn.relu)
# strides= 2使行列均减为一半
layer1_pool = tf.layers.max_pooling2d(layer1, pool_size = 2, strides = 2)
layer2 = tf.reshape(layer1_pool, [-1, 32 * 32 * 32]) # 转为N x 32 x 32 x 32维
layer2_relu = tf.layers.dense(layer2, 1024, tf.nn.relu) # 全连接层
output = tf.layers.dense(layer2_relu, 15) # 输出15人
# 设计损失函数 onehot_labels:一值为1其余为0
loss = tf.losses.softmax_cross_entropy(onehot_labels = label_input, logits = output)
# 梯度下降 每次梯度下降0.01,目的减小loss
train = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
# 计算准确度 取第二个值作为准确度(TP+TN)/total
accuracy = tf.metrics.accuracy(labels = tf.argmax(label_input, axis = 1),
predictions = tf.argmax(output, axis = 1))[1]
# 4) run acc
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init) # 初始化变量
for i in range(0, 200):
train_data_input = np.array(train_data)
train_labels_input = np.array(train_labels_new)
sess.run([train, loss], feed_dict = {data_input : train_data_input, label_input : train_labels_input})
acc = sess.run(accuracy, feed_dict = {data_input : test_data_input, label_input : test_labels_input})
# 小数点后保留两位有效数字
print('acc : %.2f' %acc)
print("END")
准确度收敛过程如下:
image.png
网友评论