提取人脸以便于分析
通过使用 mtcnn 提供 detector 将人脸检测出,但是要输入 VGGFace2 我们需要将图片进行适当调整为大小 。
from PIL import Image
import numpy as np
def extract_face_from_image(image_path, required_size=(224, 224)):
# 加载图片和并测试人脸
image = plt.imread(image_path)
detector = MTCNN()
faces = detector.detect_faces(image)
face_images = []
for face in faces:
# 提取人脸的 bounding box
x1, y1, width, height = face['box']
x2, y2 = x1 + width, y1 + height
# 提取人脸
face_boundary = image[y1:y2, x1:x2]
# 调整图片尺寸
face_image = Image.fromarray(face_boundary)
face_image = face_image.resize(required_size)
face_array = np.asarray(face_image)
face_images.append(face_array)
return face_images
extracted_face = extract_face_from_image('face_dataset/ironman_actor.jpg')
plt.imshow(extracted_face[0])
plt.show()
output_14_0.png
len(extracted_face)
1
在这一节中,首先在我们检索到的 iroman 的两个图像上测试模型。
from keras_vggface.utils import preprocess_input
from keras_vggface.vggface import VGGFace
from scipy.spatial.distance import cosine
在本节中,您需要导入三个模块:VGGFace准备要在人脸识别模型中使用的提取的人脸,以及SciPy的余弦函数来计算两个人脸之间的距离:
ironman_actor_01.jpg ironman_actor.jpg
def extract_face_from_image(image_path, required_size=(224, 224)):
# 加载图片和并测试人脸
image = plt.imread(image_path)
detector = MTCNN()
faces = detector.detect_faces(image)
face_images = []
for face in faces:
# 提取人脸的 bounding box
x1, y1, width, height = face['box']
x2, y2 = x1 + width, y1 + height
# 提取人脸
face_boundary = image[y1:y2, x1:x2]
# 调整图片尺寸
face_image = Image.fromarray(face_boundary)
face_image = face_image.resize(required_size)
face_array = np.asarray(face_image)
# face_images.append(face_array)
return face_array
def get_model_scores(faces):
samples = np.asarray(faces, 'float32')
print(samples.shape)
# 准备用于模型的数据
samples = preprocess_input(samples, version=2)
print(samples)
# 创建 vggface 模型对象
model = VGGFace(model='resnet50',
include_top=False,
input_shape=(224, 224, 3),
pooling='avg')
# 进行预测
return model.predict(samples)
faces = [extract_face_from_image(image_path)
for image_path in ['face_dataset/ironman_actor_01.jpg', 'face_dataset/ironman_actor.jpg']]
# print(faces)
model_scores = get_model_scores(faces)
# print(faces.shape)
(2, 224, 224, 3)
[[[[ -61.4953 -74.8827 -92.0912 ]
[ -60.4953 -73.8827 -91.0912 ]
[ -58.4953 -72.8827 -88.0912 ]
...
[ -74.4953 -84.8827 -112.0912 ]
[ -73.4953 -83.8827 -111.0912 ]
[ -73.4953 -83.8827 -111.0912 ]]
[[ -66.4953 -79.8827 -96.0912 ]
[ -63.4953 -77.8827 -94.0912 ]
[ -59.4953 -72.8827 -88.0912 ]
...
[ -75.4953 -85.8827 -114.0912 ]
[ -74.4953 -84.8827 -112.0912 ]
[ -74.4953 -83.8827 -112.0912 ]]
[[ -68.4953 -81.8827 -97.0912 ]
[ -64.4953 -78.8827 -94.0912 ]
[ -59.4953 -71.8827 -86.0912 ]
...
[ -77.4953 -86.8827 -116.0912 ]
[ -75.4953 -84.8827 -114.0912 ]
[ -74.4953 -83.8827 -113.0912 ]]
...
[[ 32.5047 17.117302 -14.091202 ]
[ 28.5047 13.117302 -18.091202 ]
[ 20.5047 5.117302 -26.091202 ]
...
[ 3.5046997 12.117302 29.908798 ]
[ 4.5046997 13.117302 30.908798 ]
[ 5.5046997 14.117302 31.908798 ]]
[[ 28.5047 12.117302 -19.091202 ]
[ 27.5047 11.117302 -20.091202 ]
[ 26.5047 10.117302 -21.091202 ]
...
[ 8.5047 18.117302 35.9088 ]
[ 10.5047 20.117302 37.9088 ]
[ 11.5047 21.117302 39.9088 ]]
[[ 15.5047 -0.88269806 -33.0912 ]
[ 19.5047 3.117302 -29.091202 ]
[ 28.5047 12.117302 -20.091202 ]
...
[ 12.5047 23.117302 42.9088 ]
[ 15.5047 28.117302 46.9088 ]
[ 16.5047 30.117302 48.9088 ]]]
[[[ -79.4953 -91.8827 -113.0912 ]
[ -75.4953 -87.8827 -109.0912 ]
[ -71.4953 -83.8827 -105.0912 ]
...
[ -77.4953 -83.8827 -104.0912 ]
[ -76.4953 -84.8827 -107.0912 ]
[ -78.4953 -86.8827 -109.0912 ]]
[[ -76.4953 -88.8827 -110.0912 ]
[ -74.4953 -86.8827 -108.0912 ]
[ -74.4953 -85.8827 -108.0912 ]
...
[ -71.4953 -77.8827 -98.0912 ]
[ -72.4953 -80.8827 -103.0912 ]
[ -78.4953 -86.8827 -109.0912 ]]
[[ -83.4953 -95.8827 -117.0912 ]
[ -79.4953 -92.8827 -112.0912 ]
[ -79.4953 -92.8827 -112.0912 ]
...
[ -69.4953 -75.8827 -96.0912 ]
[ -69.4953 -77.8827 -100.0912 ]
[ -72.4953 -80.8827 -103.0912 ]]
...
[[ -67.4953 -77.8827 -104.0912 ]
[ -65.4953 -75.8827 -102.0912 ]
[ -64.4953 -74.8827 -101.0912 ]
...
[ -68.4953 -85.8827 -114.0912 ]
[ -67.4953 -84.8827 -113.0912 ]
[ -64.4953 -84.8827 -112.0912 ]]
[[ -65.4953 -75.8827 -102.0912 ]
[ -65.4953 -75.8827 -102.0912 ]
[ -64.4953 -74.8827 -101.0912 ]
...
[ -65.4953 -82.8827 -111.0912 ]
[ -66.4953 -85.8827 -113.0912 ]
[ -63.4953 -83.8827 -111.0912 ]]
[[ -63.4953 -73.8827 -100.0912 ]
[ -64.4953 -74.8827 -101.0912 ]
[ -66.4953 -76.8827 -103.0912 ]
...
[ -67.4953 -86.8827 -114.0912 ]
[ -69.4953 -89.8827 -117.0912 ]
[ -66.4953 -86.8827 -114.0912 ]]]]
model_scores
array([[0. , 1.590952 , 5.945795 , ..., 0.06429956, 0.72699684,
0.40423754],
[0.17060472, 5.4521966 , 1.0741014 , ..., 0. , 1.3931723 ,
0.00671283]], dtype=float32)
if cosine(model_scores[0], model_scores[1]) <= 0.4:
print("Faces Matched")
#表示匹配
Faces Matched
最后希望大家关注我们微信公众号
wechat.jpeg
网友评论