OpenCV的人脸检测模型FaceDetectorYN
- 1. 官网地址
- 2. 如何使用
- 2.1.到opencv_zoo下载模型文件和代码
- 2.2. 下载文件展示
- 2.3. 修改了demo支持读取视频文件,默认是图片和摄像头
- ## 2.4 效果展示
1. 官网地址
https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html
FaceDetectorYN
是opencv
内置的一个人脸检测方法,使用的是yunet
。
这是一个DNN-based face detector.模型的下载地址:
https://github.com/opencv/opencv_zoo/tree/master/models/face_detection_yunet
2. 如何使用
2.1.到opencv_zoo下载模型文件和代码
2.2. 下载文件展示
2.3. 修改了demo支持读取视频文件,默认是图片和摄像头
# This file is part of OpenCV Zoo project.
# It is subject to the license terms in the LICENSE file found in the same directory.
#
# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
# Third party copyrights are property of their respective owners.import argparseimport numpy as np
import cv2 as cv# Check OpenCV version
opencv_python_version = lambda str_version: tuple(map(int, (str_version.split("."))))
assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \"Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python"from yunet import YuNet# Valid combinations of backends and targets
backend_target_pairs = [[cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],[cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA],[cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16],[cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU],[cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU]
]parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')
parser.add_argument('--input', '-i', type=str,help='Usage: Set input to a certain image, omit if using camera.')
parser.add_argument('--model', '-m', type=str, default='face_detection_yunet_2023mar.onnx',help="Usage: Set model type, defaults to 'face_detection_yunet_2023mar.onnx'.")
parser.add_argument('--backend_target', '-bt', type=int, default=0,help='''Choose one of the backend-target pair to run this demo:{:d}: (default) OpenCV implementation + CPU,{:d}: CUDA + GPU (CUDA),{:d}: CUDA + GPU (CUDA FP16),{:d}: TIM-VX + NPU,{:d}: CANN + NPU'''.format(*[x for x in range(len(backend_target_pairs))]))
parser.add_argument('--conf_threshold', type=float, default=0.7,help='Usage: Set the minimum needed confidence for the model to identify a face, defauts to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.')
parser.add_argument('--nms_threshold', type=float, default=0.3,help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3.')
parser.add_argument('--top_k', type=int, default=5000,help='Usage: Keep top_k bounding boxes before NMS.')
parser.add_argument('--save', '-s', action='store_true',help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.')
parser.add_argument('--vis', '-v', action='store_true',help='Usage: Specify to open a new window to show results. Invalid in case of camera input.')
parser.add_argument('--camera_or_video', '-c', default='123.mov',help='Usage: Specify to open camera or video')
args = parser.parse_args()def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 255, 255), fps=None):output = image.copy()landmark_color = [(0, 0, 255), # right eye(0, 0, 255), # left eye(0, 255, 0), # nose tip(255, 0, 255), # right mouth corner(0, 255, 255) # left mouth corner]if fps is not None:cv.putText(output, 'FPS: {:.2f}'.format(fps), (50, 50), cv.FONT_HERSHEY_SIMPLEX, 1.5, text_color)for det in results:bbox = det[0:4].astype(np.int32)cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), box_color, 2)conf = det[-1]cv.putText(output, '{:.4f}'.format(conf), (bbox[0], bbox[1] + bbox[3] // 2), cv.FONT_HERSHEY_DUPLEX, 1.5, text_color)landmarks = det[4:14].astype(np.int32).reshape((5, 2))for idx, landmark in enumerate(landmarks):cv.circle(output, landmark, 4, landmark_color[idx], 10)return outputif __name__ == '__main__':backend_id = backend_target_pairs[args.backend_target][0]target_id = backend_target_pairs[args.backend_target][1]# Instantiate YuNetmodel = YuNet(modelPath=args.model,inputSize=[320, 320],confThreshold=args.conf_threshold,nmsThreshold=args.nms_threshold,topK=args.top_k,backendId=backend_id,targetId=target_id)# If input is an imageif args.input is not None:image = cv.imread(args.input)h, w, _ = image.shape# Inferencemodel.setInputSize([w, h])results = model.infer(image)# Print resultsprint('{} faces detected.'.format(results.shape[0]))for idx, det in enumerate(results):print('{}: {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}'.format(idx, *det[:-1]))# Draw results on the input imageimage = visualize(image, results)# Save results if save is trueif args.save:print('Resutls saved to result.jpg\n')cv.imwrite('result.jpg', image)# Visualize results in a new windowif args.vis:cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE)cv.imshow(args.input, image)cv.waitKey(0)else: # Omit input to call default cameradeviceId = args.camera_or_videocap = cv.VideoCapture(int(deviceId) if deviceId.isdigit() else deviceId)w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))model.setInputSize([w, h])fps = int(cap.get(cv.CAP_PROP_FPS))# 定义视频编码器和创建VideoWriter对象fourcc = cv.VideoWriter_fourcc(*'mp4v') # 或者使用 'XVID'out = cv.VideoWriter('output.mp4', fourcc, fps, (w, h))tm = cv.TickMeter()while cv.waitKey(1) < 0:# Inferencetm.start()hasFrame, frame = cap.read()if not hasFrame:print('No frames grabbed!')breakresults = model.infer(frame) # results is a tupletm.stop()# Draw results on the input imageframe = visualize(frame, results, fps=tm.getFPS())# Visualize results in a new Windowcv.imshow('YuNet face detection', frame)# tm.reset()# 写入视频文件out.write(frame)out.release()cap.release()cv.destroyAllWindows()
## 2.4 效果展示
吕一_faces_detection