ffempg支持文件解码
在做一个数据通道
要求有两个
1.支持打开实时流,解码得到图片
2.支持打开视频文件,得到解码图片
第一个要求前任已经实现
1 bool FfmpegStreamChr::Open(const char *pstrFilename) 2 { 3 Close(); 4 avformat_network_init(); 5 av_register_all(); 6 7 8 std::string tempfile = pstrFilename; 9 AVDictionary* options = NULL; 10 if (tempfile.size() > 10) 11 { 12 if (memcmp(tempfile.c_str() + tempfile.size() - 4, "#tcp", 4) == 0) 13 { 14 av_dict_set(&options, "rtsp_transport", "tcp", 0); 15 tempfile.erase(tempfile.size() - 4); 16 } 17 } 18 19 //format_context_ = avformat_alloc_cotext(); 20 21 av_dict_set(&options, "stimeout","2000000", 0); 22 if (avformat_open_input(&format_context_, tempfile.c_str(), NULL, &options) < 0) 23 return false; 24 25 if (avformat_find_stream_info(format_context_, nullptr) < 0) 26 return false; 27 28 av_dump_format(format_context_, 0, tempfile.c_str(), 0); 29 30 video_stream_index_ = -1; 31 32 pts_first = true; 33 duration = format_context_->duration / AV_TIME_BASE; 34 35 int video_stream_idx = av_find_best_stream(format_context_, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); 36 video_st = format_context_->streams[video_stream_idx]; 37 38 39 for (unsigned int i = 0; i < format_context_->nb_streams; i++) 40 { 41 AVCodecParameters *enc = format_context_->streams[i]->codecpar; 42 if (!enc) 43 continue; 44 if (AVMEDIA_TYPE_VIDEO == enc->codec_type && video_stream_index_ < 0) 45 { 46 width_ = enc->width; 47 height_ = enc->height; 48 49 codec_ = avcodec_find_decoder(enc->codec_id); 50 if (!codec_) 51 return false; 52 codec_context_ = avcodec_alloc_context3(codec_); 53 if (!codec_context_) 54 return false; 55 if (avcodec_open2(codec_context_, codec_, NULL) < 0) 56 { 57 avcodec_free_context(&codec_context_); 58 return false; 59 } 60 61 if (width_ && (enc->width != width_)) 62 enc->width = width_; 63 64 if (height_ && (enc->height != height_)) 65 66 enc->height = height_; 67 68 video_stream_index_ = i; 69 } 70 } 71 if (video_stream_index_ == -1) 72 return false; 73 74 yuv_frame_ = av_frame_alloc(); 75 memset(rgb_data_, 0, sizeof(uint8_t *) * 8); 76 memset(rgb_line_size_, 0, sizeof(int) * 8); 77 rgb_line_size_[0] = 3 * width_; 78 DecodeToImage(); 79 return true; 80 }
bool FfmpegStreamChr::DecodeToImage(){ if (!format_context_ ||!codec_context_) return false; int count_no_video_stream = 0; int continue_counts = 0; const int max_number_of_attempts = 250; // const int max_number_of_video_stream_attempts = 1 << 16; for (;;) { TempImg = cv::Mat(); if (continue_counts > max_number_of_attempts) return false; // opt_time = GetTickCount(); int ret = av_read_frame(format_context_, &packet_); if (ret == AVERROR(EAGAIN)) { ++continue_counts; continue; } if (ret < 0) continue; if (packet_.stream_index != video_stream_index_) { // count_no_video_stream++; //if (count_no_video_stream > max_number_of_video_stream_attempts) // return false; av_packet_unref(&packet_); TempImg.release(); continue; } // std::chrono::system_clock::time_point t1 = std::chrono::system_clock::now(); ret = avcodec_send_packet(codec_context_, &packet_); if (avcodec_receive_frame(codec_context_, yuv_frame_) == 0) { //std::chrono::system_clock::time_point t2 = std::chrono::system_clock::now(); if (!y2r_sws_context_) { y2r_sws_context_ = sws_getContext(width_, height_, codec_context_->pix_fmt, codec_context_->width, codec_context_->height, /*video_stream_->codec->pix_fmt*/ AV_PIX_FMT_BGR24, /*SWS_BICUBIC*/ SWS_BILINEAR, NULL, NULL, NULL); if (!y2r_sws_context_) { av_packet_unref(&packet_); TempImg.release(); continue; } } if (!TempImg.data) TempImg.create(height_, width_, CV_8UC3); rgb_data_[0] = TempImg.data; if (sws_scale(y2r_sws_context_, yuv_frame_->data, yuv_frame_->linesize, 0, codec_context_->height, rgb_data_, rgb_line_size_) <= 0) { av_packet_unref(&packet_); TempImg.release(); av_frame_unref(yuv_frame_); return false; } // std::chrono::system_clock::time_point t3 = std::chrono::system_clock::now(); //printf("decode : %d , switch : %d\n", std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count(), std::chrono::duration_cast<std::chrono::milliseconds>(t3 - t2).count()); if (pts_first) { bg_pts = packet_.pts; pts_first = false; } //timestamp = yuv_frame_->pts*av_q2d(video_st->time_base); // timestamp = (packet_.pts - bg_pts)*av_q2d(video_st->time_base); // durationstamp = duration; av_packet_unref(&packet_); av_frame_unref(yuv_frame_); // return true; LOG_DEBUG(100) << "SendToAll \n"; SendToAll(TempImg); } ++continue_counts; av_packet_unref(&packet_); } }
解码文件的时候报如下错
Error splitting the input into NAL units
加入如下代码添加到open函数的54行后面,正常解码文件
//TODO:: add to hanldle file if (avcodec_parameters_to_context(codec_context_, enc) < 0){ return false; }