Member 12837194 Ответов: 0

Есть ли какой-нибудь способ обнаружить лицо“распознавание лиц” в предварительно записанном видео?


Is there any way to detect face in pre recorded video? I have tried with live streaming with AVCaptureSession along with CIDetector and was able to detect face for core images and live streaming. Is there any way for face detection in already recorded video, so that I am able to add emoji on face.


Что я уже пробовал:

//для распознавания лиц я использую ниже

//for face detetion in swift
    func addingPictureForDetection(facePicture: UIImageView)
    {
        guard let personciImage = CIImage(image: facePicture.image!) else {
            return
        }
        
        let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
        let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
        let faces = faceDetector.featuresInImage(personciImage)
        print(faces)
        //face features and its dimensions
        for findFeature in faces as! [CIFaceFeature] {
            
            
            let face:CIFaceFeature = findFeature ;
            NSLog("Face found at (%f,%f) of dimensions %fx%f", face.bounds.origin.x, face.bounds.origin.y, face.bounds.width, face.bounds.height);
            //videoImage.frame = CGRectMake( face.bounds.origin.x, face.bounds.origin.y, face.bounds.width, face.bounds.height)
            let ciImageSize = personciImage.extent.size
            var transform = CGAffineTransformMakeScale(1, -1)
            transform = CGAffineTransformTranslate(transform, 0, -ciImageSize.height)
            //calculating the face features and
            
            var faceViewBounds = CGRectApplyAffineTransform(face.bounds, transform)
            
            // Calculate the actual position and size of the rectangle in the image view
            let viewSize = facePicture.bounds.size
            let scale = min(viewSize.width / ciImageSize.width,
                            viewSize.height / ciImageSize.height)
            let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
            let offsetY = (viewSize.height - ciImageSize.height * scale) / 2
            
            faceViewBounds = CGRectApplyAffineTransform(faceViewBounds, CGAffineTransformMakeScale(scale, scale))
            faceViewBounds.origin.x += offsetX
            faceViewBounds.origin.y += offsetY
            
            let faceBox = UIView(frame: faceViewBounds)
            
            faceBox.layer.borderWidth = 3
            faceBox.layer.borderColor = UIColor.redColor().CGColor
            faceBox.backgroundColor = UIColor.clearColor()
            facePicture.addSubview(faceBox)
            
            var faceWidth:CGFloat
            faceWidth=face.bounds.size.width
           
            
            if face.hasLeftEyePosition {
                // create a UIView with a size based on the width of the face
                let leftEyeView = UIView(frame: CGRect(x: face.leftEyePosition.x - faceWidth * 0.15, y: face.leftEyePosition.y - faceWidth * 0.15, width: faceWidth * 0.3, height: faceWidth * 0.3))
                // change the background color of the eye view
                let blue = UIColor.blueColor() // 1.0 alpha
                leftEyeView.backgroundColor = blue.colorWithAlphaComponent(0.3)
                // set the position of the leftEyeView based on the face
                leftEyeView.center = face.leftEyePosition
                // round the corners
                leftEyeView.layer.cornerRadius = faceWidth * 0.15
                // add the view to the window
                self.view.addSubview(leftEyeView)
            }
            if face.hasRightEyePosition {
                print("Right eye bounds are \(face.rightEyePosition)")
            }
            
            if face.hasMouthPosition {
                print("Mouth position are \(face.mouthPosition)")
            }
            
        }
    }

0 Ответов