Swift Metal渲染视频(二)
三、获取视频帧画面,传给Metal渲染纹理
1.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 | 创建 AVMutableComposition let composition = AVMutableComposition () self . composition = composition 添加视频轨道 func addVideoTrack ( to compostion : AVMutableComposition , preferedTrackID : CMPersistentTrackID ){ guard let source = renderLayer . source else { return } guard let assetTrack = source . tracks ( for : . video ). first else { return } trackId = preferedTrackID preferredTransform = assetTrack . preferredTransform let compositionTrack : AVMutableCompositionTrack ? = { if let com = compostion . track ( withTrackID : preferedTrackID ){ return com } else { return compostion . addMutableTrack ( withMediaType : . video , preferredTrackID : preferedTrackID ) } }() if let compositionTrack = compositionTrack { do { print ( "------------------------------------" ) try compositionTrack . insertTimeRange ( source . selectedTimeRange , of : assetTrack , at : timeRangeInTimeline . start ) } catch { print ( " add video track failed!" ) } } } |
2.添加实现AVVideoCompositionInstructionProtocol协议的类,为创建AVMutableVideoComposition作准备
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | class VideoCompositionInstruction : NSObject , AVVideoCompositionInstructionProtocol { var timeRange : CMTimeRange var enablePostProcessing : Bool var containsTweening : Bool var requiredSourceTrackIDs : [ NSValue ]? var passthroughTrackID : CMPersistentTrackID var videoRenderLayers : [ VideoRenderLayer ] = [] init ( videoRenderLayers : [ VideoRenderLayer ], timeRange : CMTimeRange ) { self . timeRange = timeRange enablePostProcessing = true containsTweening = true passthroughTrackID = kCMPersistentTrackID_Invalid super . init () self . videoRenderLayers = videoRenderLayers var trackIDSet : Set < CMPersistentTrackID > = [] videoRenderLayers . forEach { videoRenderLayer in if let videoRenderLayerGroup = videoRenderLayer as ? VideoRenderLayerGroup { let recursiveTrackIDs = videoRenderLayerGroup . recursiveTrackIDs () trackIDSet = trackIDSet . union ( Set ( recursiveTrackIDs )) } else { trackIDSet . insert ( videoRenderLayer . trackId ) } } requiredSourceTrackIDs = Array ( trackIDSet ) . filter { $ 0 != kCMPersistentTrackID_Invalid } . compactMap { $ 0 as NSValue } } } |
3.添加实现AVVideoCompositing协议的类,为创建AVMutableVideoComposition作准备
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | lass VideoCompositor : NSObject , AVVideoCompositing { private var renderingQueue = DispatchQueue ( label : "com.studio.VideoEditor.renderingqueue" ) private var renderContextQueue = DispatchQueue ( label : "com.studio.VideoEditor.rendercontextqueue" ) private var renderContext : AVVideoCompositionRenderContext ? private var shouldCancelAllRequests = false private let layerCompositor = LayerCompositor () var sourcePixelBufferAttributes : [ String : Any ]? = [ String ( kCVPixelBufferPixelFormatTypeKey ): [ Int ( kCVPixelFormatType_32ABGR ), Int ( kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange ), Int ( kCVPixelFormatType_420YpCbCr8BiPlanarFullRange )], String ( kCVPixelBufferOpenGLCompatibilityKey ): true ] var requiredPixelBufferAttributesForRenderContext : [ String : Any ] = [ String ( kCVPixelBufferPixelFormatTypeKey ): Int ( kCVPixelFormatType_32BGRA ), String ( kCVPixelBufferOpenGLESCompatibilityKey ): true ] func renderContextChanged ( _ newRenderContext : AVVideoCompositionRenderContext ) { renderingQueue . sync { renderContext = newRenderContext } } enum PixelBufferRequestError : Error { case newRenderedPixelBufferForRequestFailure } // func startRequest ( _ asyncVideoCompositionRequest : AVAsynchronousVideoCompositionRequest ) { autoreleasepool { renderingQueue . async { if self . shouldCancelAllRequests { asyncVideoCompositionRequest . finishCancelledRequest () } else { guard let resultPixels = self . newRenderedPixelBufferForRequest ( asyncVideoCompositionRequest ) else { asyncVideoCompositionRequest . finish ( with : PixelBufferRequestError . newRenderedPixelBufferForRequestFailure ) return } asyncVideoCompositionRequest . finish ( withComposedVideoFrame : resultPixels ) } } } } func cancelAllPendingVideoCompositionRequests (){ renderingQueue . sync { shouldCancelAllRequests = true } //执行完了之后复原 renderingQueue . async { self . shouldCancelAllRequests = false } } func newRenderedPixelBufferForRequest ( _ request : AVAsynchronousVideoCompositionRequest ) - > CVPixelBuffer ?{ guard let newPixelBuffer = renderContext ?. newPixelBuffer () else { return nil } layerCompositor . renderPixelBuffer ( newPixelBuffer , for : request ) return newPixelBuffer } } |
4.创建AVMutableVideoComposition
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | private func makeVideoComposition () - > AVMutableVideoComposition { // TODO: optimize make performance, like return when exist // Convert videoRenderLayers to videoCompositionInstructions // Step 1: Put the layer start time and end time on the timeline, each interval is an instruction. Then sort by time // Make sure times contain zero var times : [ CMTime ] = [ CMTime . zero ] videoRenderLayers . forEach { videoRenderLayer in let startTime = videoRenderLayer . timeRangeInTimeline . start let endTime = videoRenderLayer . timeRangeInTimeline . end if ! times . contains ( startTime ) { times . append ( startTime ) } if ! times . contains ( endTime ) { times . append ( endTime ) } } times . sort { $ 0 < $ 1 } // Step 2: Create instructions for each interval var instructions : [ VideoCompositionInstruction ] = [] for index in 0 .. < times . count - 1 { let startTime = times [ index ] let endTime = times [ index + 1 ] let timeRange = CMTimeRange ( start : startTime , end : endTime ) var intersectingVideoRenderLayers : [ VideoRenderLayer ] = [] videoRenderLayers . forEach { videoRenderLayer in if ! videoRenderLayer . timeRangeInTimeline . intersection ( timeRange ). isEmpty { intersectingVideoRenderLayers . append ( videoRenderLayer ) } } intersectingVideoRenderLayers . sort ( by : { $ 0 . renderLayer . layerLevel < $ 1 . renderLayer . layerLevel }) let instruction = VideoCompositionInstruction ( videoRenderLayers : intersectingVideoRenderLayers , timeRange : timeRange ) instructions . append ( instruction ) } // Create videoComposition. Specify frameDuration, renderSize, instructions, and customVideoCompositorClass. let videoComposition = AVMutableVideoComposition () videoComposition . frameDuration = renderComposition . frameDuration videoComposition . renderSize = renderComposition . renderSize videoComposition . instructions = instructions videoComposition . customVideoCompositorClass = VideoCompositor . self self . videoComposition = videoComposition return videoComposition } |
5.绑定相应的playerItem,
1 2 3 4 5 6 7 8 | func makePlayerItem () - > AVPlayerItem { let composition = makeComposition () let playerItem = AVPlayerItem ( asset : composition ) playerItem . videoComposition = makeVideoComposition () // playerItem.audioMix = makeAudioMix() return playerItem } |
这样档player播放时就走AVVideoCompositing协议的方法:
func startRequest(_ asyncVideoCompositionRequest: AVAsynchronousVideoCompositionRequest)
在此方法中可以获取到对应的buffer,然后转化成MTLTexture,提供给Metal去渲染
buffer转为teture:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | public class func makeTexture ( pixelBuffer : CVPixelBuffer , pixelFormat : MTLPixelFormat = . bgra8Unorm , width : Int ? = nil , height : Int ? = nil , plane : Int = 0 ) - > Texture ? { guard let iosurface = CVPixelBufferGetIOSurface ( pixelBuffer )?. takeUnretainedValue () else { return nil } let textureWidth : Int , textureHeight : Int if let width = width , let height = height { textureWidth = width textureHeight = height } else { textureWidth = CVPixelBufferGetWidth ( pixelBuffer ) textureHeight = CVPixelBufferGetHeight ( pixelBuffer ) } let descriptor = MTLTextureDescriptor . texture2DDescriptor ( pixelFormat : pixelFormat , width : textureWidth , height : textureHeight , mipmapped : false ) descriptor . usage = [. renderTarget , . shaderRead , . shaderWrite ] guard let metalTexture = sharedMetalRenderingDevice . device . makeTexture ( descriptor : descriptor , iosurface : iosurface , plane : plane ) else { return nil } let texture = Texture ( texture : metalTexture ) return texture } |
四、实时进行的渲染阶段
前边的准备工作都做好之后就是真正的渲染阶段,每一帧每一帧的渲染出来就可以看见绚丽的画面了
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 | public func renderTexture ( _ outputTexture : Texture ) { let _ = textureInputSemaphore . wait ( timeout : DispatchTime . distantFuture ) defer { textureInputSemaphore . signal () } if inputTextures . count > = maximumInputs { guard let commandBuffer = sharedMetalRenderingDevice . commandQueue . makeCommandBuffer () else { return } commandBuffer . renderQuad ( pipelineState : renderPipelineState , uniformSetting : uniformSettings , inputTexture : inputTextures , outputTeture : outputTexture , enableOutputTextureRead : enableOutputTextureRead ) commandBuffer . commit () } } func renderQuad ( pipelineState : MTLRenderPipelineState , uniformSetting : ShaderUniformSettings ? = nil , inputTexture : [ UInt : Texture ], imageVertices : [ Float ] = standardImageVertices , textureCoodinates : [ Float ] = standardTextureCoordinates , outputTeture : Texture , enableOutputTextureRead : Bool ){ //渲染过程描述符 let renderPass = MTLRenderPassDescriptor () renderPass . colorAttachments [ 0 ]. texture = outputTeture . texture renderPass . colorAttachments [ 0 ]. clearColor = Color . mtlClearColor renderPass . colorAttachments [ 0 ]. storeAction = . store renderPass . colorAttachments [ 0 ]. loadAction = enableOutputTextureRead ? . load : . clear //获取渲染命令编码器 guard let renderEncoder = self . makeRenderCommandEncoder ( descriptor : renderPass ) else { fatalError ( "could not create render encoder" ) } //设置视口 renderEncoder . setFrontFacing (. counterClockwise ) //设置渲染管道 renderEncoder . setRenderPipelineState ( pipelineState ) //设置顶点位置(此处设置的是标准的顶点位置,restorShaderSettings中设置具体的顶点和片元位置) renderEncoder . setVertexBytes ( imageVertices , length : imageVertices . count * MemoryLayout < Float > . size , index : 0 ) //渲染纹理 for textureIndex in 0 .. < inputTexture . count { renderEncoder . setVertexBytes ( textureCoodinates , length : textureCoodinates . count * MemoryLayout < Float > . size , index : 1 + textureIndex ) renderEncoder . setFragmentTexture ( inputTexture [ UInt ( textureIndex )]!. texture , index : textureIndex ) } //设置具体滤镜的参数 uniformSetting ?. restorShaderSettings ( renderEncoder : renderEncoder ) renderEncoder . drawPrimitives ( type : . triangleStrip , vertexStart : 0 , vertexCount : 4 ) //二维图像四个顶点 renderEncoder . endEncoding () } |
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 阿里最新开源QwQ-32B,效果媲美deepseek-r1满血版,部署成本又又又降低了!
· AI编程工具终极对决:字节Trae VS Cursor,谁才是开发者新宠?
· 开源Multi-agent AI智能体框架aevatar.ai,欢迎大家贡献代码
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!