第31月第19天 NV12

1.

    //设置CIContext,并从CIImage -> CGImage -> UIImage
    CIContext *context = [CIContext contextWithOptions:nil];
    CGImageRef cgImage = [context createCGImage: outputImage fromRect:qrRect];
    UIImage *resultIamge = [UIImage imageWithCGImage:cgImage];
    //(如果 直接用[UIImage imageWithCIImage:outputImage]; 会得到一个不是位图的图片)

 

还有一个,就是CIImage 只有经过context 转化为CGImage后,才能变成位图图片。(非位图图片,不能保存到相册,不能转换为NSData (jpeg png))

 
 
2.

做过iOS硬解码的都知道,创建解码器时,需要指定PixelFormatType。IOS只支持NV12也就是YUV420中的一种,你搜索420,发现有四个,分别如下:

kCVPixelFormatType_420YpCbCr8Planar

kCVPixelFormatType_420YpCbCr8PlanarFullRange

kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange

kCVPixelFormatType_420YpCbCr8BiPlanarFullRange

根据表面意思,可以看出,可以分为两类:planar(平面420p)和 BiPlanar(双平面)。

还有一个办法区分,CVPixelBufferGetPlaneCount(pixel)获取平面数量,发现kCVPixelFormatType_420YpCbCr8Planar和kCVPixelFormatType_420YpCbCr8PlanarFullRange是三个两面,属于420p,iOS不支持。而kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange和kCVPixelFormatType_420YpCbCr8BiPlanarFullRange是两个平面。这就纠结了,到底用哪一个呢?

https://blog.csdn.net/tanningzhong/article/details/76690694

 3.
 
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange = '420v',表示输出的视频格式为NV12;范围: (luma=[16,235] chroma=[16,240])
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange = '420f',表示输出的视频格式为NV12;范围: (luma=[0,255] chroma=[1,255])
kCVPixelFormatType_32BGRA = 'BGRA', 输出的是BGRA的格式


https://www.jianshu.com/p/7da76246ce82
 
 
 kCVPixelFormatType_420YpCbCr8Planar = 'y420',  
 /* Planar Component Y'CbCr 8-bit 4:2:0.  baseAddr points to a big-endian CVPlanarPixelBufferInfo_YCbCrPlanar struct */

  kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange = '420v', 
/* Bi-Planar Component Y'CbCr 8-bit 4:2:0, video-range (luma=[16,235] chroma=[16,240]).  baseAddr points to a big-endian CVPlanarPixelBufferInfo_YCbCrBiPlanar struct */

  kCVPixelFormatType_420YpCbCr8BiPlanarFullRange  = '420f', 
/* Bi-Planar Component Y'CbCr 8-bit 4:2:0, full-range (luma=[0,255] chroma=[1,255]).  baseAddr points to a big-endian CVPlanarPixelBufferInfo_YCbCrBiPlanar struct */
#YpCbCr

Y分量:Y,U分量:Cb,V分量:Cr。即YUV格式的数据。

#8-bit

并且每个点采用8bit来保存一个Y的亮度。

#4:2:0

YUV的详细格式为:4:2:0。

# baseAddr points to a big-endian CVPlanarPixelBufferInfo_YCbCrPlanar struct

YUV数据的地址在CVPlanarPixelBufferInfo_YCbCrPlanar中以大端的形式存储。

#Planar & Bi-Planar

第一个是Planar模式,第二个是BiPlanar模式。
Planar格式就是单平面模式,在这个模式下,一个buf存储所有的数据。将Y、U、V分量分别打包,依次存储。即YYYY...U...V...即I420.
BiPlanar格式就是双平面模式,在这个模式下,亮度和色度被分成两个buf来存储。将Y和UV分别打包,一次存储。即YYYY...UV...即NV12.

#VideoRange & FullRange

亮度和色度的取值为8位,即2^8 = 256即可取值为【0-255】
VideoRange能取的值宽度为【16-235】
FullRange能取得值宽度为【0-255】

#采集信息查看

查看采集到的信息。

CMSampleBufferGetFormatDescription(sampleBuffer);
#如何从采集的CMSampleBufferRef中取得YUV数据

转化为CVImageBufferRef:

CVImageBufferRef buffer = CMSampleBufferGetImageBuffer(sampleBuffer);

获取宽高:

CVPixelBufferGetWidth(pixelBuffer);
CVPixelBufferGetHeight(pixelBuffer);

取得YUV数据地址:

CVPixelBufferGetBaseAddressOfPlane(pixelBuffer,Plane_index);
//这里的Plane_index与上文的Plane模式相关
如果是Plane模式则直接取到所有数据
如果是BiPlane则需要分两次,即Plane_index=0取得Y分量地址与Plane_index=1取得UV分量的地址
#注意事项

在操作pixelBuffer的时候记得加上锁

    CVPixelBufferLockBaseAddress(pixelBuffer, lockFlag);
    //在这里操作
    CVPixelBufferUnlockBaseAddress(pixelBuffer, lockFlag);

 

    CVPixelBufferLockBaseAddress(pixelBuffer, 0);
    
    // 获取CVImageBufferRef中的y数据
    UInt8 *pY = (UInt8 *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
    // 获取CMVImageBufferRef中的uv数据
    UInt8 *pUV = (UInt8 *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1);
    
    size_t width = CVPixelBufferGetWidth(pixelBuffer);
    size_t height = CVPixelBufferGetHeight(pixelBuffer);
    size_t pYBytes = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
    size_t pUVBytes = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1);

    UInt8 *pYUV420P = (UInt8 *)malloc(width * height * 3 / 2);
    UInt8 *pU = pYUV420P + (width * height);
    UInt8 *pV = pU + (width * height / 4);
    
    for(int i = 0; i < height; i++) {
        memcpy(pYUV420P + i * width, pY + i * pYBytes, width);
    }
    
    for(int j = 0; j < height / 2; j++) {
        for(int i = 0; i < width / 2; i++) {
            *(pU++) = pUV[i<<1];
            *(pV++) = pUV[(i<<1) + 1];
        }
        
        pUV += pUVBytes;
    }
    
    CVPixelBufferRef getCroppedPixelBuffer = [self copyDataFromBuffer:pYUV420P toYUVPixelBufferWithWidth:width Height:height];


- (CVPixelBufferRef) copyDataFromBuffer:(const unsigned char*)buffer toYUVPixelBufferWithWidth:(size_t)width Height:(size_t)height
{
    
    uint8_t* nv12 = buffer;
    
    NSDictionary *pixelAttributes = @{(NSString*)kCVPixelBufferIOSurfacePropertiesKey:@{}};
    CVPixelBufferRef pixelBuffer = NULL;
    CVReturn result = CVPixelBufferCreate(kCFAllocatorDefault,
                                          width,
                                          height,
                                          kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange,
                                          (__bridge CFDictionaryRef)(pixelAttributes),
                                          &pixelBuffer);
    
    size_t pYBytes = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
    size_t pUVBytes = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1);
    
    CVPixelBufferLockBaseAddress(pixelBuffer,0);
    unsigned char *yDestPlane = (unsigned char *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
    unsigned char *y_ch0 = nv12;
    unsigned char *y_ch1 = nv12 + width * height;
//    memcpy(yDestPlane, y_ch0, width * height);
    for(int i = 0; i < height; i++) {
        memcpy(yDestPlane + i * pYBytes, y_ch0 + i * width, width);
    }
    unsigned char *uvDestPlane = (unsigned char *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1);
//    memcpy(uvDestPlane, y_ch1, width * height/2);
    
    UInt8 *pU = nv12 + (width * height);
    UInt8 *pV = pU + (width * height / 4);
    
    for(int j = 0; j < height / 2; j++) {
        for(int i = 0; i < width / 2; i++) {
            uvDestPlane[i<<1] = *(pU++) ;
            uvDestPlane[(i<<1) + 1] = *(pV++);
        }
        
        uvDestPlane += pUVBytes;
    }
    
    //    if (nv12) {
    //        free(nv12);
    //    }
    
    CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
    
    if (result != kCVReturnSuccess) {
        NSLog(@"Unable to create cvpixelbuffer %d", result);
    }

    return pixelBuffer;
    
}

 

 

 

3.

/**
 * 把 CMSampleBufferRef 转化成 UIImage 的方法,参考自:
 * https://stackoverflow.com/questions/19310437/convert-cmsamplebufferref-to-uiimage-with-yuv-color-space
 * note1 : SDK要求 colorSpace 为 CGColorSpaceCreateDeviceRGB
 * note2 : SDK需要 ARGB 格式的图片
 */
- (UIImage *) imageFromSamplePlanerPixelBuffer:(CMSampleBufferRef)sampleBuffer{
    @autoreleasepool {
        CMFormatDescriptionRef desc = CMSampleBufferGetFormatDescription(sampleBuffer);
        NSLog(@">>%@",desc);
        
        // Get a CMSampleBuffer's Core Video image buffer for the media data
        CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
        // Lock the base address of the pixel buffer
        CVPixelBufferLockBaseAddress(imageBuffer, 0);
        
        // Get the number of bytes per row for the plane pixel buffer
        void *baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
        
        // Get the number of bytes per row for the plane pixel buffer
        size_t bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer,0);
        // Get the pixel buffer width and height
        size_t width = CVPixelBufferGetWidth(imageBuffer);
        size_t height = CVPixelBufferGetHeight(imageBuffer);
        
//        uint8_t *rgbabuffer = baseAddress;
//        for (int y=0; y<100; y++) {
//            for (int x=0; x<width;x++) {
//                rgbabuffer[y*bytesPerRow+x*4+0] = 0;
//                rgbabuffer[y*bytesPerRow+x*4+1] = 0;
//                rgbabuffer[y*bytesPerRow+x*4+2] = 255;
//                rgbabuffer[y*bytesPerRow+x*4+3] = 1;
//            }
//        }
        
        // Create a device-dependent RGB color space
        CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
        
        // Create a bitmap graphics context with the sample buffer data
        CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8,
                                                     bytesPerRow, colorSpace, kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little);
        // Create a Quartz image from the pixel data in the bitmap graphics context
        CGImageRef quartzImage = CGBitmapContextCreateImage(context);
        // Unlock the pixel buffer
        CVPixelBufferUnlockBaseAddress(imageBuffer,0);
        
        // Free up the context and color space
        CGContextRelease(context);
        CGColorSpaceRelease(colorSpace);
        
        // Create an image object from the Quartz image
        UIImage *image = [UIImage imageWithCGImage:quartzImage];
        
        // Release the Quartz image
        CGImageRelease(quartzImage);
        return (image);
    }
}

 

 

 

 

 

 
 
posted @ 2019-04-19 11:08  lianhuaren  阅读(397)  评论(0编辑  收藏  举报