Wil*_*jay 39 avfoundation face-detection ios swift
如何像"相机"那样实时进行人脸检测?
我注意到,AVCaptureStillImageOutput为10.0后弃用,因此我用 AVCapturePhotoOutput代替.但是,我发现我为面部检测保存的图像并不那么令人满意?有任何想法吗?
UPDATE
在尝试了@Shravya Boggarapu之后提到.目前,我AVCaptureMetadataOutput用来检测脸部没有CIFaceDetector.它按预期工作.然而,当我试图画出脸部的边界时,似乎错位了.任何的想法?
let metaDataOutput = AVCaptureMetadataOutput()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: .back)
do {
let input = try AVCaptureDeviceInput(device: backCamera)
if (captureSession.canAddInput(input)) {
captureSession.addInput(input)
// MetadataOutput instead
if(captureSession.canAddOutput(metaDataOutput)) {
captureSession.addOutput(metaDataOutput)
metaDataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metaDataOutput.metadataObjectTypes = [AVMetadataObjectTypeFace]
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = cameraView.bounds
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
cameraView.layer.addSublayer(previewLayer!)
captureSession.startRunning()
}
}
} catch {
print(error.localizedDescription)
}
Run Code Online (Sandbox Code Playgroud)
和
extension CameraViewController: AVCaptureMetadataOutputObjectsDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [Any]!, from connection: AVCaptureConnection!) {
if findFaceControl {
findFaceControl = false
for metadataObject in metadataObjects {
if (metadataObject as AnyObject).type == AVMetadataObjectTypeFace {
print("")
print(metadataObject)
let bounds = (metadataObject as! AVMetadataFaceObject).bounds
print("origin x: \(bounds.origin.x)")
print("origin y: \(bounds.origin.y)")
print("size width: \(bounds.size.width)")
print("size height: \(bounds.size.height)")
print("cameraView width: \(self.cameraView.frame.width)")
print("cameraView height: \(self.cameraView.frame.height)")
var face = CGRect()
face.origin.x = bounds.origin.x * self.cameraView.frame.width
face.origin.y = bounds.origin.y * self.cameraView.frame.height
face.size.width = bounds.size.width * self.cameraView.frame.width
face.size.height = bounds.size.height * self.cameraView.frame.height
print(face)
showBounds(at: face)
}
}
}
}
}
Run Code Online (Sandbox Code Playgroud)
原版的
var captureSession = AVCaptureSession()
var photoOutput = AVCapturePhotoOutput()
var previewLayer: AVCaptureVideoPreviewLayer?
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(true)
captureSession.sessionPreset = AVCaptureSessionPresetHigh
let backCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
do {
let input = try AVCaptureDeviceInput(device: backCamera)
if (captureSession.canAddInput(input)) {
captureSession.addInput(input)
if(captureSession.canAddOutput(photoOutput)){
captureSession.addOutput(photoOutput)
captureSession.startRunning()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer?.frame = cameraView.bounds
cameraView.layer.addSublayer(previewLayer!)
}
}
} catch {
print(error.localizedDescription)
}
}
func captureImage() {
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [kCVPixelBufferPixelFormatTypeKey as String: previewPixelType
]
settings.previewPhotoFormat = previewFormat
photoOutput.capturePhoto(with: settings, delegate: self)
}
func capture(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhotoSampleBuffer photoSampleBuffer: CMSampleBuffer?, previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let error = error {
print(error.localizedDescription)
}
// Not include previewPhotoSampleBuffer
if let sampleBuffer = photoSampleBuffer,
let dataImage = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: nil) {
self.imageView.image = UIImage(data: dataImage)
self.imageView.isHidden = false
self.previewLayer?.isHidden = true
self.findFace(img: self.imageView.image!)
}
}
Run Code Online (Sandbox Code Playgroud)
findFace与正常图像的作品.但是,我通过相机拍摄的图像不起作用,或者有时只能识别一张脸.
正常图像
捕获图像
func findFace(img: UIImage) {
guard let faceImage = CIImage(image: img) else { return }
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
// For converting the Core Image Coordinates to UIView Coordinates
let detectedImageSize = faceImage.extent.size
var transform = CGAffineTransform(scaleX: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -detectedImageSize.height)
if let faces = faceDetector?.features(in: faceImage, options: [CIDetectorSmile: true, CIDetectorEyeBlink: true]) {
for face in faces as! [CIFaceFeature] {
// Apply the transform to convert the coordinates
var faceViewBounds = face.bounds.applying(transform)
// Calculate the actual position and size of the rectangle in the image view
let viewSize = imageView.bounds.size
let scale = min(viewSize.width / detectedImageSize.width,
viewSize.height / detectedImageSize.height)
let offsetX = (viewSize.width - detectedImageSize.width * scale) / 2
let offsetY = (viewSize.height - detectedImageSize.height * scale) / 2
faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
print("faceBounds = \(faceViewBounds)")
faceViewBounds.origin.x += offsetX
faceViewBounds.origin.y += offsetY
showBounds(at: faceViewBounds)
}
if faces.count != 0 {
print("Number of faces: \(faces.count)")
} else {
print("No faces ")
}
}
}
func showBounds(at bounds: CGRect) {
let indicator = UIView(frame: bounds)
indicator.frame = bounds
indicator.layer.borderWidth = 3
indicator.layer.borderColor = UIColor.red.cgColor
indicator.backgroundColor = .clear
self.imageView.addSubview(indicator)
faceBoxes.append(indicator)
}
Run Code Online (Sandbox Code Playgroud)
Shr*_*apu 12
检测面部有两种方法:一种是CIFaceDetector,另一种是AVCaptureMetadataOutput
根据您的要求,选择与您相关的内容.
CIFaceDetector具有更多功能 - 例如:为您提供眼睛和嘴巴的位置,微笑探测器等
另一方面,AVCaptureMetadataOutput是在帧上计算的,并且跟踪检测到的面部,并且我们不需要添加额外的代码.我发现,由于跟踪面在此过程中被更可靠地检测到.这个问题是你只需要检测面部,没有眼睛/嘴巴的位置.此方法的另一个优点是方向问题较小,因为只要更改设备方向并且面的方向将相对于该方向,您就可以使用videoOrientation
在我的情况下,我的应用程序使用YUV420作为所需的格式,因此使用CIDetector(与RGB一起使用)是不可行的.使用AVCaptureMetadataOutput节省了大量精力,并且由于连续跟踪而更可靠地执行.
一旦我有了面部的边界框,我编写了额外的功能,例如皮肤检测,并将其应用于静止图像.
注意:捕获静止图像时,面板信息将与元数据一起添加,因此不会出现同步问题.
您还可以使用两者的组合来获得更好的结果.
根据您的应用,探索和评估优缺点.
UPDATE
面矩形是wrt图像的起源.因此,对于屏幕,它可能会有所不同.使用以下内容:
for (AVMetadataFaceObject *faceFeatures in metadataObjects) {
CGRect face = faceFeatures.bounds;
CGRect facePreviewBounds = CGRectMake(face.origin.y * previewLayerRect.size.width,
face.origin.x * previewLayerRect.size.height,
face.size.width * previewLayerRect.size.height,
face.size.height * previewLayerRect.size.width);
/* Draw rectangle facePreviewBounds on screen */
}
Run Code Online (Sandbox Code Playgroud)
要在iOS上执行面部检测,可以使用CIDetector(Apple)或Mobile Vision(Google)API.
IMO,Google Mobile Vision提供了更好的性能.
如果您有兴趣,这是您可以玩的项目.(iOS 10.2,Swift 3)
WWDC 2017年后,苹果公司推出CoreML在iOS的11 视觉框架,使脸部检测更准确:)
我做了一个演示项目.包含Vision vs CIDetector.此外,它还包含实时的面部地标检测.
| 归档时间: |
|
| 查看次数: |
13754 次 |
| 最近记录: |