在iOS设备上使用AVFoundation框架从摄像头获取实时的视频流

56 min read

在iOS设备上使用AVFoundation框架从摄像头获取实时的视频流需要以下步骤:

1.导入AVFoundation框架

2.设置AVCaptureSession并添加输出设备

3.设置AVCaptureVideoPreviewLayer并添加到视图层中

4.启动AVCaptureSession

5.实现处理视频流的AVCaptureVideoDataOutputSampleBufferDelegate代理方法

6.结束AVCaptureSession

以下是详细的步骤及代码实现示例:

  1. 导入AVFoundation框架
import AVFoundation
  1. 设置AVCaptureSession并添加输出设备
let captureSession = AVCaptureSession()

// 设置输入源
guard let captureDevice = AVCaptureDevice.default(for: .video),
    let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
    fatalError("Could not create AVCaptureDeviceInput")
}

captureSession.addInput(captureDeviceInput)

// 设置输出源
let captureOutput = AVCaptureVideoDataOutput()

let settings: [String: Any] = [
    String(kCVPixelBufferPixelFormatTypeKey): Int(kCVPixelFormatType_32BGRA),
]
captureOutput.videoSettings = settings
captureOutput.alwaysDiscardsLateVideoFrames = true

// 添加输出代理
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: .userInteractive))
captureSession.addOutput(captureOutput)

let captureConnection = captureOutput.connection(with: .video)
captureConnection?.videoOrientation = .portrait
  1. 设置AVCaptureVideoPreviewLayer并添加到视图层中
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.frame = view.bounds

// 将预览层添加到视图层中
view.layer.insertSublayer(previewLayer, at: 0)
  1. 启动AVCaptureSession
captureSession.startRunning()
  1. 实现处理视频流的AVCaptureVideoDataOutputSampleBufferDelegate代理方法
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        // 处理视频帧
    }
}
  1. 结束AVCaptureSession
captureSession.stopRunning()

完整的示例代码:

import UIKit
import AVFoundation

class ViewController: UIViewController {
    
    let captureSession = AVCaptureSession()
    
    override func viewDidLoad() {
        super.viewDidLoad()
        view.backgroundColor = .white
        
        // 设置输入源
        guard let captureDevice = AVCaptureDevice.default(for: .video),
            let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
            fatalError("Could not create AVCaptureDeviceInput")
        }
        
        captureSession.addInput(captureDeviceInput)
        
        // 设置输出源
        let captureOutput = AVCaptureVideoDataOutput()
        
        let settings: [String: Any] = [
            String(kCVPixelBufferPixelFormatTypeKey): Int(kCVPixelFormatType_32BGRA),
        ]
        captureOutput.videoSettings = settings
        captureOutput.alwaysDiscardsLateVideoFrames = true
        
        // 添加输出代理
        captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: .userInteractive))
        captureSession.addOutput(captureOutput)
        
        let captureConnection = captureOutput.connection(with: .video)
        captureConnection?.videoOrientation = .portrait
        
        // 设置预览层
        let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        previewLayer.videoGravity = .resizeAspectFill
        previewLayer.frame = view.bounds
        
        // 将预览层添加到视图层中
        view.layer.insertSublayer(previewLayer, at: 0)
        
        // 启动AVCaptureSession
        captureSession.startRunning()
    }
    
}

extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        // 处理视频帧
    }
}