Swift Metal渲染视频(二)
三、获取视频帧画面,传给Metal渲染纹理
1.
创建AVMutableComposition let composition = AVMutableComposition() self.composition = composition 添加视频轨道 func addVideoTrack(to compostion: AVMutableComposition, preferedTrackID: CMPersistentTrackID){ guard let source = renderLayer.source else { return } guard let assetTrack = source.tracks(for: .video).first else { return } trackId = preferedTrackID preferredTransform = assetTrack.preferredTransform let compositionTrack: AVMutableCompositionTrack? = { if let com = compostion.track(withTrackID: preferedTrackID){ return com }else{ return compostion.addMutableTrack(withMediaType: .video, preferredTrackID: preferedTrackID) } }() if let compositionTrack = compositionTrack{ do { print("------------------------------------") try compositionTrack.insertTimeRange(source.selectedTimeRange, of: assetTrack, at: timeRangeInTimeline.start) } catch { print(" add video track failed!") } } }
2.添加实现AVVideoCompositionInstructionProtocol协议的类,为创建AVMutableVideoComposition作准备
class VideoCompositionInstruction: NSObject, AVVideoCompositionInstructionProtocol { var timeRange: CMTimeRange var enablePostProcessing: Bool var containsTweening: Bool var requiredSourceTrackIDs: [NSValue]? var passthroughTrackID: CMPersistentTrackID var videoRenderLayers: [VideoRenderLayer] = [] init(videoRenderLayers: [VideoRenderLayer], timeRange: CMTimeRange) { self.timeRange = timeRange enablePostProcessing = true containsTweening = true passthroughTrackID = kCMPersistentTrackID_Invalid super.init() self.videoRenderLayers = videoRenderLayers var trackIDSet: Set<CMPersistentTrackID> = [] videoRenderLayers.forEach { videoRenderLayer in if let videoRenderLayerGroup = videoRenderLayer as? VideoRenderLayerGroup { let recursiveTrackIDs = videoRenderLayerGroup.recursiveTrackIDs() trackIDSet = trackIDSet.union(Set(recursiveTrackIDs)) } else { trackIDSet.insert(videoRenderLayer.trackId) } } requiredSourceTrackIDs = Array(trackIDSet) .filter { $0 != kCMPersistentTrackID_Invalid } .compactMap { $0 as NSValue } } }
3.添加实现AVVideoCompositing协议的类,为创建AVMutableVideoComposition作准备
lass VideoCompositor: NSObject, AVVideoCompositing { private var renderingQueue = DispatchQueue(label: "com.studio.VideoEditor.renderingqueue") private var renderContextQueue = DispatchQueue(label: "com.studio.VideoEditor.rendercontextqueue") private var renderContext: AVVideoCompositionRenderContext? private var shouldCancelAllRequests = false private let layerCompositor = LayerCompositor() var sourcePixelBufferAttributes: [String : Any]? = [ String(kCVPixelBufferPixelFormatTypeKey): [Int(kCVPixelFormatType_32ABGR), Int(kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange), Int(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)], String(kCVPixelBufferOpenGLCompatibilityKey): true ] var requiredPixelBufferAttributesForRenderContext: [String : Any] = [ String(kCVPixelBufferPixelFormatTypeKey): Int(kCVPixelFormatType_32BGRA), String(kCVPixelBufferOpenGLESCompatibilityKey): true ] func renderContextChanged(_ newRenderContext: AVVideoCompositionRenderContext) { renderingQueue.sync { renderContext = newRenderContext } } enum PixelBufferRequestError: Error { case newRenderedPixelBufferForRequestFailure } // func startRequest(_ asyncVideoCompositionRequest: AVAsynchronousVideoCompositionRequest) { autoreleasepool { renderingQueue.async { if self.shouldCancelAllRequests{ asyncVideoCompositionRequest.finishCancelledRequest() }else{ guard let resultPixels = self.newRenderedPixelBufferForRequest(asyncVideoCompositionRequest) else{ asyncVideoCompositionRequest.finish(with: PixelBufferRequestError.newRenderedPixelBufferForRequestFailure) return } asyncVideoCompositionRequest.finish(withComposedVideoFrame: resultPixels) } } } } func cancelAllPendingVideoCompositionRequests(){ renderingQueue.sync { shouldCancelAllRequests = true } //执行完了之后复原 renderingQueue.async { self.shouldCancelAllRequests = false } } func newRenderedPixelBufferForRequest(_ request: AVAsynchronousVideoCompositionRequest) -> CVPixelBuffer?{ guard let newPixelBuffer = renderContext?.newPixelBuffer() else{ return nil } layerCompositor.renderPixelBuffer(newPixelBuffer,for:request) return newPixelBuffer } }
4.创建AVMutableVideoComposition
private func makeVideoComposition() -> AVMutableVideoComposition { // TODO: optimize make performance, like return when exist // Convert videoRenderLayers to videoCompositionInstructions // Step 1: Put the layer start time and end time on the timeline, each interval is an instruction. Then sort by time // Make sure times contain zero var times: [CMTime] = [CMTime.zero] videoRenderLayers.forEach { videoRenderLayer in let startTime = videoRenderLayer.timeRangeInTimeline.start let endTime = videoRenderLayer.timeRangeInTimeline.end if !times.contains(startTime) { times.append(startTime) } if !times.contains(endTime) { times.append(endTime) } } times.sort { $0 < $1 } // Step 2: Create instructions for each interval var instructions: [VideoCompositionInstruction] = [] for index in 0..<times.count - 1 { let startTime = times[index] let endTime = times[index + 1] let timeRange = CMTimeRange(start: startTime, end: endTime) var intersectingVideoRenderLayers: [VideoRenderLayer] = [] videoRenderLayers.forEach { videoRenderLayer in if !videoRenderLayer.timeRangeInTimeline.intersection(timeRange).isEmpty { intersectingVideoRenderLayers.append(videoRenderLayer) } } intersectingVideoRenderLayers.sort(by: { $0.renderLayer.layerLevel < $1.renderLayer.layerLevel }) let instruction = VideoCompositionInstruction(videoRenderLayers: intersectingVideoRenderLayers, timeRange: timeRange) instructions.append(instruction) } // Create videoComposition. Specify frameDuration, renderSize, instructions, and customVideoCompositorClass. let videoComposition = AVMutableVideoComposition() videoComposition.frameDuration = renderComposition.frameDuration videoComposition.renderSize = renderComposition.renderSize videoComposition.instructions = instructions videoComposition.customVideoCompositorClass = VideoCompositor.self self.videoComposition = videoComposition return videoComposition }
5.绑定相应的playerItem,
func makePlayerItem() -> AVPlayerItem { let composition = makeComposition() let playerItem = AVPlayerItem(asset: composition) playerItem.videoComposition = makeVideoComposition() // playerItem.audioMix = makeAudioMix() return playerItem }
这样档player播放时就走AVVideoCompositing协议的方法:
func startRequest(_ asyncVideoCompositionRequest: AVAsynchronousVideoCompositionRequest)
在此方法中可以获取到对应的buffer,然后转化成MTLTexture,提供给Metal去渲染
buffer转为teture:
public class func makeTexture(pixelBuffer: CVPixelBuffer, pixelFormat: MTLPixelFormat = .bgra8Unorm, width: Int? = nil, height: Int? = nil, plane: Int = 0) -> Texture? { guard let iosurface = CVPixelBufferGetIOSurface(pixelBuffer)?.takeUnretainedValue() else { return nil } let textureWidth: Int, textureHeight: Int if let width = width, let height = height { textureWidth = width textureHeight = height } else { textureWidth = CVPixelBufferGetWidth(pixelBuffer) textureHeight = CVPixelBufferGetHeight(pixelBuffer) } let descriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: pixelFormat, width: textureWidth, height: textureHeight, mipmapped: false) descriptor.usage = [.renderTarget, .shaderRead, .shaderWrite] guard let metalTexture = sharedMetalRenderingDevice.device.makeTexture(descriptor: descriptor, iosurface: iosurface, plane: plane) else { return nil } let texture = Texture(texture: metalTexture) return texture }
四、实时进行的渲染阶段
前边的准备工作都做好之后就是真正的渲染阶段,每一帧每一帧的渲染出来就可以看见绚丽的画面了
public func renderTexture(_ outputTexture: Texture) { let _ = textureInputSemaphore.wait(timeout:DispatchTime.distantFuture) defer { textureInputSemaphore.signal() } if inputTextures.count >= maximumInputs { guard let commandBuffer = sharedMetalRenderingDevice.commandQueue.makeCommandBuffer() else { return } commandBuffer.renderQuad(pipelineState: renderPipelineState, uniformSetting: uniformSettings, inputTexture: inputTextures, outputTeture: outputTexture, enableOutputTextureRead: enableOutputTextureRead) commandBuffer.commit() } } func renderQuad(pipelineState:MTLRenderPipelineState, uniformSetting:ShaderUniformSettings? = nil, inputTexture: [UInt: Texture], imageVertices: [Float] = standardImageVertices, textureCoodinates: [Float] = standardTextureCoordinates, outputTeture: Texture, enableOutputTextureRead:Bool){ //渲染过程描述符 let renderPass = MTLRenderPassDescriptor() renderPass.colorAttachments[0].texture = outputTeture.texture renderPass.colorAttachments[0].clearColor = Color.mtlClearColor renderPass.colorAttachments[0].storeAction = .store renderPass.colorAttachments[0].loadAction = enableOutputTextureRead ? .load : .clear //获取渲染命令编码器 guard let renderEncoder = self.makeRenderCommandEncoder(descriptor: renderPass) else{ fatalError("could not create render encoder") } //设置视口 renderEncoder.setFrontFacing(.counterClockwise) //设置渲染管道 renderEncoder.setRenderPipelineState(pipelineState) //设置顶点位置(此处设置的是标准的顶点位置,restorShaderSettings中设置具体的顶点和片元位置) renderEncoder.setVertexBytes(imageVertices, length: imageVertices.count * MemoryLayout<Float>.size, index: 0) //渲染纹理 for textureIndex in 0..<inputTexture.count{ renderEncoder.setVertexBytes(textureCoodinates, length: textureCoodinates.count * MemoryLayout<Float> .size, index: 1 + textureIndex) renderEncoder.setFragmentTexture(inputTexture[UInt(textureIndex)]!.texture, index: textureIndex) } //设置具体滤镜的参数 uniformSetting?.restorShaderSettings(renderEncoder: renderEncoder) renderEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4)//二维图像四个顶点 renderEncoder.endEncoding() }