海思3519 qt ffmpeg 软解码播放avi

在海思3519上基于qt采用ffmpeg对avi进行解码显示,其中ffmpeg的配置,qt的配置在前文中已经说明,在此不再赘述。

解码

解码在单独的线程中进行,具体的代码如下:

void VideoPlayer::run()
{
    AVFormatContext *fmt_ctx = NULL;
    AVCodecContext *dec_ctx = NULL;
    AVFrame *pf = av_frame_alloc();
    AVFrame *pfc = av_frame_alloc();
    int video_stream_index;
    int width, height;
    av_register_all();

    video_stream_index = getStream(&fmt_ctx, &dec_ctx, "./source_file/test.avi");

    decoder(&fmt_ctx, &dec_ctx, video_stream_index, pf, pfc, &width, &height);
}


int VideoPlayer::decoder(AVFormatContext** fmt_ctx, AVCodecContext** dec_ctx,
                         int video_stream_index, AVFrame *pFrame, AVFrame *pFrameColor, int* width, int* height)
{
    AVPacket packet;
    int i = 0;
    int frameFinished;
    uint8_t *buffer;
    int numBytes;
    char filename[32];


    numBytes = avpicture_get_size(AV_PIX_FMT_RGB24, (*dec_ctx)->width, (*dec_ctx)->height);
    buffer = (uint8_t*)av_malloc(numBytes);
    avpicture_fill((AVPicture *)pFrameColor, buffer, AV_PIX_FMT_RGB24, (*dec_ctx)->width, (*dec_ctx)->height);


    while (av_read_frame(*fmt_ctx, &packet) >= 0) {
        if (packet.stream_index == video_stream_index) {
            avcodec_decode_video2(*dec_ctx, pFrame, &frameFinished, &packet);
            if (frameFinished)
            {

                    struct SwsContext *img_convert_ctx = NULL;
                    img_convert_ctx = sws_getCachedContext(img_convert_ctx, (*dec_ctx)->width,
                                                           (*dec_ctx)->height, (*dec_ctx)->pix_fmt,
                                                           (*dec_ctx)->width, (*dec_ctx)->height,
                                                           AV_PIX_FMT_RGB24, SWS_BICUBIC,
                                                           NULL, NULL, NULL);
                    if (!img_convert_ctx) {
                        fprintf(stderr, "Cannot initialize sws conversion context\n");
                        exit(1);
                    }

                    sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data,
                              pFrame->linesize, 0, (*dec_ctx)->height, pFrameColor->data,
                              pFrameColor->linesize);

                    QImage tmpImg((uchar *)buffer,(*dec_ctx)->width,(*dec_ctx)->height,QImage::Format_RGB888);
                    QImage image = tmpImg.copy();
                    emit sig_GetOneFrame(image);

            }
        }
        av_free_packet(&packet);
    }
    printf("finished");

    av_free(buffer);
    av_free(pFrameColor);
    av_free(pFrame);
    avcodec_close(*dec_ctx);
    avformat_close_input(fmt_ctx);
}


int VideoPlayer::getStream(AVFormatContext **fmt_ctx, AVCodecContext **dec_ctx, char* file_name)
{
    int video_stream_index = -1;
    int ret;
    bool decoder_init = false;

     if ((ret = avformat_open_input(fmt_ctx, file_name, NULL, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "fail to open input file.\n");
        return ret;
    }

    if ((ret = avformat_find_stream_info(*fmt_ctx, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "fail to find stream information.\n");
        avformat_close_input(fmt_ctx);
        return ret;
    }

    for (int i = 0; i < (*fmt_ctx)->nb_streams; i++) {
        if ((*fmt_ctx)->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_stream_index = i;

            if (!decoder_init) {
                *dec_ctx = (*fmt_ctx)->streams[i]->codec;

                AVCodec *cod = avcodec_find_decoder((*dec_ctx)->codec_id);

                if (!cod) {
                    av_log(NULL, AV_LOG_ERROR, "fail to find decoder.\n");
                    avformat_close_input(fmt_ctx);
                    return 1;
                }
                if (avcodec_open2(*dec_ctx, cod, NULL) != 0) {
                    av_log(NULL, AV_LOG_ERROR, "fail to open codecer.\n");
                    avformat_close_input(fmt_ctx);
                    return 2;
                }
                decoder_init = true;

            }
        }
        return 0;
    }

    return video_stream_index;
}

解码后将数据转换成RGB并进一步转换成QImage,每解码一帧数据后发送一个信号用于更新图像显示

QImage tmpImg((uchar *)buffer,(*dec_ctx)->width,(*dec_ctx)->height,QImage::Format_RGB888);
QImage image = tmpImg.copy();
emit sig_GetOneFrame(image);                      

显示

具体代码如下:


void MainWindow::paintEvent(QPaintEvent *event)
{
    QPainter painter(this);
    painter.setBrush(Qt::black);
    painter.drawRect(0, 0, this->width(), this->height()); //先画成黑色

    if (mImage.size().width() <= 0) return;


    QImage img = mImage.scaled(this->size(),Qt::KeepAspectRatio);

    int x = this->width() - img.width();
    int y = this->height() - img.height();

    x /= 2;
    y /= 2;

    painter.drawImage(QPoint(x,y),img); //画出图像

}

void MainWindow::slotGetOneFrame(QImage img)
{
    mImage = img;
    update();
}

总结:
编译后能够在板子上成功运行,但是显示比较缓慢,这块具体的原因没有进一步分析,因为后来才用了硬解码的方式,所以放弃了深入研究,参考代码获取

posted @ 2019-06-21 16:55  youngliu91  阅读(1909)  评论(0编辑  收藏  举报