FFMPEG 内部YUV转RGB过程

以ffmpeg 4.2 为例

 1. 建立YUV-RGB映射表

 

/* Color space conversion coefficients for YCbCr -> RGB mapping.
*
* Entries are {crv, cbu, cgu, cgv}
*
* crv = (255 / 224) * 65536 * (1 - cr) / 0.5
* cbu = (255 / 224) * 65536 * (1 - cb) / 0.5
* cgu = (255 / 224) * 65536 * (cb / cg) * (1 - cb) / 0.5
* cgv = (255 / 224) * 65536 * (cr / cg) * (1 - cr) / 0.5
*
* where Y = cr * R + cg * G + cb * B and cr + cg + cb = 1.
*/
const int32_t ff_yuv2rgb_coeffs[11][4] = {
{ 117489, 138438, 13975, 34925 }
};

    
const int bpp = c->dstFormatBpp; uint8_t *y_table; uint16_t *y_table16; uint32_t *y_table32; int i, base, rbase, gbase, bbase, av_uninit(abase), needAlpha; const int yoffs = (fullRange ? 384 : 326) + YUVRGB_TABLE_LUMA_HEADROOM; const int table_plane_size = 1024 + 2*YUVRGB_TABLE_LUMA_HEADROOM; int64_t crv = inv_table[0]; int64_t cbu = inv_table[1]; int64_t cgu = -inv_table[2]; int64_t cgv = -inv_table[3]; int64_t cy = 1 << 16; int64_t oy = 0; int64_t yb = 0; if (!fullRange) { cy = (cy * 255) / 219; oy = 16 << 16; } else { crv = (crv * 224) / 255; cbu = (cbu * 224) / 255; cgu = (cgu * 224) / 255; cgv = (cgv * 224) / 255; } cy = (cy * contrast) >> 16; crv = (crv * contrast * saturation) >> 32; cbu = (cbu * contrast * saturation) >> 32; cgu = (cgu * contrast * saturation) >> 32; cgv = (cgv * contrast * saturation) >> 32; oy -= 256 * brightness; c->uOffset = 0x0400040004000400LL; c->vOffset = 0x0400040004000400LL; c->yCoeff = roundToInt16(cy * (1 << 13)) * 0x0001000100010001ULL; c->vrCoeff = roundToInt16(crv * (1 << 13)) * 0x0001000100010001ULL; c->ubCoeff = roundToInt16(cbu * (1 << 13)) * 0x0001000100010001ULL; c->vgCoeff = roundToInt16(cgv * (1 << 13)) * 0x0001000100010001ULL; c->ugCoeff = roundToInt16(cgu * (1 << 13)) * 0x0001000100010001ULL; c->yOffset = roundToInt16(oy * (1 << 3)) * 0x0001000100010001ULL; c->yuv2rgb_y_coeff = (int16_t)roundToInt16(cy * (1 << 13)); c->yuv2rgb_y_offset = (int16_t)roundToInt16(oy * (1 << 9)); c->yuv2rgb_v2r_coeff = (int16_t)roundToInt16(crv * (1 << 13)); c->yuv2rgb_v2g_coeff = (int16_t)roundToInt16(cgv * (1 << 13)); c->yuv2rgb_u2g_coeff = (int16_t)roundToInt16(cgu * (1 << 13)); c->yuv2rgb_u2b_coeff = (int16_t)roundToInt16(cbu * (1 << 13)); //scale coefficients by cy crv = ((crv * (1 << 16)) + 0x8000) / FFMAX(cy, 1); cbu = ((cbu * (1 << 16)) + 0x8000) / FFMAX(cy, 1); cgu = ((cgu * (1 << 16)) + 0x8000) / FFMAX(cy, 1); cgv = ((cgv * (1 << 16)) + 0x8000) / FFMAX(cy, 1); av_freep(&c->yuvTable);

 

2. 实际转换过程

    ret = c->swscale(c, src2, srcStride2, srcSliceY_internal, srcSliceH, dst2, dstStride2);  //FFmpeg-master/libswscale/swscale.c:969
 
   YUV2RGBFUNC(yuv2rgb_c_24_bgr, uint8_t, 0) : yuv2rgb.c:378
// only trivial mods from yuv2rgb_c_24_rgb
YUV2RGBFUNC(yuv2rgb_c_24_bgr, uint8_t, 0)
    LOADCHROMA(0);
    PUTBGR24(dst_1, py_1, 0);计算第一行2个像素的RGB
    PUTBGR24(dst_2, py_2, 0);计算第二行2个像素RGB

    LOADCHROMA(1);
    PUTBGR24(dst_2, py_2, 1);
    PUTBGR24(dst_1, py_1, 1);

    LOADCHROMA(2);
    PUTBGR24(dst_1, py_1, 2);
    PUTBGR24(dst_2, py_2, 2);

    LOADCHROMA(3);
    PUTBGR24(dst_2, py_2, 3);
    PUTBGR24(dst_1, py_1, 3);

 

#define LOADCHROMA(i)                               \
    U = pu[i];                                      \
    V = pv[i];                                      \
    r = (void *)c->table_rV[V+YUVRGB_TABLE_HEADROOM];                     \
    g = (void *)(c->table_gU[U+YUVRGB_TABLE_HEADROOM] + c->table_gV[V+YUVRGB_TABLE_HEADROOM]);  \
    b = (void *)c->table_bU[U+YUVRGB_TABLE_HEADROOM];
  
#define PUTRGB(dst, src, i)                         \
    Y              = src[2 * i];                    \
    dst[2 * i]     = r[Y] + g[Y] + b[Y];            \
    Y              = src[2 * i + 1];                \
    dst[2 * i + 1] = r[Y] + g[Y] + b[Y];
posted @ 2020-09-14 21:07  洛笔达  阅读(1620)  评论(1编辑  收藏  举报