ffmpeg的解码过程在前面已经稍微总结了下,这里主要是测试一下用ffmpeg如何进行实时的解码。
在解码之前,我们先做好准备工作,调用摄像头。编码的过程中,进行入队出队操作,出队后的数据交给解码器,进行解码。
接下来依次介绍各个模块。
1.调用摄像头:
VideoCapture capture(0); int w = capture.get(CV_CAP_PROP_FRAME_WIDTH); int h = capture.get(CV_CAP_PROP_FRAME_HEIGHT); int yuv_bufLen = w * h * 3 / 2; unsigned char* pYuvBuf = new unsigned char[yuv_bufLen]; cout << "Frame size : " << w << " x " << h << endl; namedWindow("opencamera", CV_WINDOW_AUTOSIZE); while (1) { Mat frame; capture >> frame; imshow("opencamera", frame); if (waitKey(30) == 27) break; }怎么利用opencv调用摄像头,这里不做过多介绍,可以参考这里: 点击打开链接。
2.编码过程:
DWORD WINAPI x264_encode(LPVOID lparam) { VideoCapture capture(0); if (!capture.isOpened()) { cout << "Cannot open the video cam" << endl; return -1; } int w = capture.get(CV_CAP_PROP_FRAME_WIDTH); int h = capture.get(CV_CAP_PROP_FRAME_HEIGHT); result_link_type* result_link = (result_link_type*)lparam; int yuv_bufLen = w * h * 3 / 2; //unsigned char* pYuvBuf = new unsigned char[yuv_bufLen]; //int fps =25; size_t yuv_size = w * h * 3 / 2; x264_t *encoder; x264_picture_t pic_in, pic_out; uint8_t *yuv_buffer; x264_param_t param; //x264_param_default_preset(¶m, "veryfast", "zerolatency"); //为结构体param赋默认值 x264_param_default_preset(¶m, "veryfast", "animation"); //param.i_threads = 1; //并行编码多帧 param.i_width = w; //视频图像的宽 param.i_height = h; // param.i_fps_num = fps; //帧率分子 //param.i_fps_den = 1; //帧率分母 , fps_num / fps_den = 帧率 //param.i_keyint_max = 50; //IDR帧之间的间隔 //param.b_intra_refresh = 1; //是否使用周期帧内刷新IDR帧 //param.b_annexb = 1; //加前缀码0x00000001 //param.rc.b_mb_tree = 0; //实时编码必须为0,否则有延迟 //param.b_sliced_threads = 0; //x264_param_apply_profile(¶m, "baseline"); //编码器的参数,使用baseline编码,可以跟上面的参数做冲突比较 encoder = x264_encoder_open(¶m); //打开编码器,初始化param #if 1 x264_picture_alloc(&pic_in, X264_CSP_I420, w, h); //为pic_in分配内存 yuv_buffer = (uint8_t*)malloc(yuv_size); //给yuv_buffer分配内存 pic_in.img.plane[0] = yuv_buffer; //pic_in的三通道分别赋值 pic_in.img.plane[1] = pic_in.img.plane[0] + w * h; pic_in.img.plane[2] = pic_in.img.plane[1] + w * h / 4; int64_t i_pts = 0; x264_nal_t *nals; int nnal; FILE *fp_out = fopen("test.h264", "wb"); if (!fp_out) { printf("Could not open output 264 file\n"); return -1; } #if 1 FILE* pFileOut = fopen("test.yuv", "w+"); if (!pFileOut) { printf("Could not open input yuv file\n"); return -1; } #endif cout << "Frame size : " << w << " x " << h << endl; namedWindow("opencamera", CV_WINDOW_AUTOSIZE); Mat frame; while (1) { capture >> frame; //摄像头处抓取一帧 imshow("opencamera", frame); //显示 //if (waitKey(30) == 27) break; waitKey(1); cv::Mat yuvImg; cv::cvtColor(frame, yuvImg, CV_BGR2YUV_I420); //YUV转RGB memcpy(yuv_buffer, yuvImg.data, yuv_bufLen*sizeof(unsigned char)); //YUV数据复制到yuv_buffer中 //fwrite(yuv_buffer, yuv_bufLen*sizeof(unsigned char), 1, pFileOut); //YUV写入本地 //while (fread(yuv_buffer, 1, yuv_size, inf) > 0) //{ pic_in.i_pts = i_pts++; x264_encoder_encode(encoder, &nals, &nnal, &pic_in, &pic_out); //编码一帧数据 x264_nal_t *nal; int j = 0; struct result_node_datatype *result_node = new struct result_node_datatype; result_node->result = new unsigned char[800000]; memset(result_node->result, '\0', 800000); result_node->size = 0; for (nal = nals; nal < nals + nnal; nal++) { //fwrite(nal->p_payload, 1, nal->i_payload, fp_out); //产生的NAL保存在本地 //result_node->size += nal->i_payload; //memcpy(result_node->result, nal->p_payload, nal->i_payload); //cout << "nal->i_payload = " <<nal->i_payload<< endl; //j = j + nal->i_payload; //result_push(result_link, result_node); //cout << "in for(nal): j = "<<j << endl; memcpy(result_node->result + j, nal->p_payload, nal->i_payload); j = j + nal->i_payload; } result_node->size = j; cout << "result_node->size = " << result_node->size << endl; result_push(result_link, result_node); } x264_encoder_close(encoder); //关闭编码器 //fclose(inf); //free(yuv_buffer); //fclose(pFileOut); //delete[] pYuvBuf; //Sleep(100); #endif return NULL; }
X264编码的过程可以参考这里:点击打开链接。
需要注意的是,我们定义了一个为0的值j。编码产生后的NAL单元个数是nnal,编码后数据的起始地址是nal->p_payload,长度是nal->i_payload。增加j的原因是想把得到的一个个NAL单元累加在一起,组成一个完整帧的数据,最后一帧的长度就是j,然后将得到的一帧数据与长度送入队列,这是一个线程函数。对解码器来说,只有接收到完整的一帧,才能成功解码。
3.队列函数:
void result_push(result_link_type* result_link, result_node_datatype * result_node) //入队操作 { if (result_link->head == NULL) { result_link->head = result_node; result_link->end = result_link->head; result_link->result_num++; // cout << "0: result_link->result_num++" << endl; } else { result_link->end->next = result_node; result_link->end = result_node; result_link->result_num++; // cout << "1: result_link->result_num++" << endl; } } struct result_node_datatype* result_pop(result_link_type* result_link) //出队操作 { struct result_node_datatype* tmp_node; if (result_link->head == NULL) return NULL; else if (result_link->head == result_link->end) { // cout << "result_link->head == result_link->end " << endl; return NULL; } else { tmp_node = result_link->head; result_link->head = result_link->head->next; result_link->result_num--; //cout << "result_link->result_num--" << endl; return tmp_node; } }
4.解码过程:
解码之前,要添加标志位0001。
bool get_h264_data(uchar* buf,int in_len,uchar* out_buf, int &out_len) { char nalu[4] = { 0x00, 0x00, 0x00, 0x01 }; memcpy(out_buf, nalu, 4); out_buf += 4; memcpy(out_buf, buf, in_len); out_len = in_len + 4; // cout << "out_len = " <<out_len<< endl; return true; }
解码过程:
int main(int argc, char* argv[]) { HANDLE thread1; result_link_type *result_link = new result_link_type; result_link->head = result_link->end = NULL; result_link->result_num = 0; thread1 = CreateThread(NULL, 0, x264_encode, (LPVOID)result_link, 0, NULL); Sleep(1); //system("pause"); #if 1 Mat pCvMat; AVCodec *pCodec; AVCodecContext *pCodecCtx = NULL; AVCodecParserContext *pCodecParserCtx = NULL; int frame_count; FILE *fp_in; FILE *fp_out; AVFrame *pFrame, *pFrameYUV; uint8_t *out_buffer; // const int in_buffer_size = 4096; const int in_buffer_size = 800000; //uint8_t in_buffer[in_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE] = { 0 }; uint8_t in_buffer[in_buffer_size]; memset(in_buffer, 0, sizeof(in_buffer)); uint8_t *cur_ptr; int cur_size; AVPacket packet; int ret, got_picture; int y_size; AVCodecID codec_id = AV_CODEC_ID_H264; // char filepath_in[] = "test.h264"; // char filepath_out[] = "1.yuv"; int first_time = 1; struct SwsContext *img_convert_ctx; //av_log_set_level(AV_LOG_DEBUG); avcodec_register_all(); pCodec = avcodec_find_decoder(codec_id); if (!pCodec) { printf("Codec not found\n"); return -1; } pCodecCtx = avcodec_alloc_context3(pCodec); if (!pCodecCtx){ printf("Could not allocate video codec context\n"); return -1; } pCodecParserCtx = av_parser_init(codec_id); if (!pCodecParserCtx){ printf("Could not allocate video parser context\n"); return -1; } if (pCodec->capabilities&CODEC_CAP_TRUNCATED) pCodecCtx->flags |= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */ if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { printf("Could not open codec\n"); return -1; } #if 0 //Input File fp_in = fopen(filepath_in, "rb"); if (!fp_in) { printf("Could not open input stream\n"); return -1; } //Output File fp_out = fopen(filepath_out, "wb"); if (!fp_out) { printf("Could not open output YUV file\n"); return -1; } #endif pFrame = av_frame_alloc(); av_init_packet(&packet); AVFrame* pFrameBGR = av_frame_alloc(); //存储解码后转换的RGB数据 // 保存BGR,opencv中是按BGR来保存的 int size; //cout << "pCodecCtx->width = " << pCodecCtx->width << "\npCodecCtx->height = " << pCodecCtx->height << endl; //pCvMat.create(cv::Size(pCodecCtx->width, pCodecCtx->height), CV_8UC3); struct result_node_datatype *result_node2 = NULL; int out_len; while (1) { // cur_size = fread(in_buffer, 1, in_buffer_size, fp_in); // cout << "result_link->size = " << result_link->result_num << endl; result_node2 = result_pop(result_link); if (result_node2 == NULL) { Sleep(1); // cout << "result_node2 is NULL" << endl; continue; } //cur_size = result_node2->size; //cout<<"after result_pop()" << endl; get_h264_data(result_node2->result, result_node2->size, in_buffer, out_len); //cur_size = result_node2->size; cur_size = out_len; cout << "cur_size = " << cur_size << endl; if (cur_size == 0) break; cur_ptr = in_buffer; //cur_ptr = result_node2->result; while (cur_size>0){ int len = av_parser_parse2( pCodecParserCtx, pCodecCtx, &packet.data, &packet.size, cur_ptr, cur_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE); cur_ptr += len; cur_size -= len; if (packet.size == 0) continue; //Some Info from AVCodecParserContext printf("Packet Size:%6d\t", packet.size); switch (pCodecParserCtx->pict_type){ case AV_PICTURE_TYPE_I: printf("Type: I\t"); break; case AV_PICTURE_TYPE_P: printf("Type: P\t"); break; case AV_PICTURE_TYPE_B: printf("Type: B\t"); break; default: printf("Type: Other\t"); break; } printf("Output Number:%4d\t", pCodecParserCtx->output_picture_number); printf("Offset:%8ld\n", pCodecParserCtx->cur_offset); ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet); if (ret < 0) { printf("Decode Error.(解码错误)\n"); return ret; } if (got_picture) { if (first_time){ printf("\nCodec Full Name:%s\n", pCodecCtx->codec->long_name); printf("width:%d\nheight:%d\n\n", pCodecCtx->width, pCodecCtx->height); //SwsContext //img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, // pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL); //pFrameYUV = av_frame_alloc(); //out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)); //avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); //y_size = pCodecCtx->width*pCodecCtx->height; //size = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); size = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); out_buffer = (uint8_t *)av_malloc(size); avpicture_fill((AVPicture *)pFrameBGR, out_buffer, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); // allocator memory for BGR buffer cout << "pCodecCtx->width = " << pCodecCtx->width << "\npCodecCtx->height = " << pCodecCtx->height << endl; pCvMat.create(cv::Size(pCodecCtx->width, pCodecCtx->height), CV_8UC3); first_time = 0; } printf("Succeed to decode 1 frame!\n"); //sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,pFrameYUV->data, pFrameYUV->linesize); sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameBGR->data, pFrameBGR->linesize); //fwrite(pFrameYUV->data[0], 1, y_size, fp_out); //Y //fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_out); //U //fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_out); //V cout << "size = " << size << endl; memcpy(pCvMat.data, out_buffer, size); imshow("RGB", pCvMat); waitKey(1); } } } system("pause"); //Flush Decoder packet.data = NULL; packet.size = 0; #if 0 while (1){ ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet); if (ret < 0) { printf("Decode Error.(解码错误)\n"); return ret; } if (!got_picture) break; if (got_picture) { printf("Flush Decoder: Succeed to decode 1 frame!\n"); sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); fwrite(pFrameYUV->data[0], 1, y_size, fp_out); //Y fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_out); //U fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_out); //V } } #endif // fclose(fp_in); // fclose(fp_out); sws_freeContext(img_convert_ctx); av_parser_close(pCodecParserCtx); //av_frame_free(&pFrameYUV); av_frame_free(&pFrameBGR); av_frame_free(&pFrame); avcodec_close(pCodecCtx); av_free(pCodecCtx); #endif return 0; }
解码流程以及基本参数和函数的意义,可以参考链接:点击打开链接。
解码成功后,got_picture为非零值,在这个判断里面,解码出的数据转为RGB格式,并用opencv显示出来。
运行效果图:
运行的过程中,对比上面的效果图,发现解码后的图像比摄像头要延迟几秒钟,这是因为在编码的过程,我们使用了参数animation,编码开始前会缓存几帧,再开始,这样帧间编码效果好,压缩效率高。如果使用zerolatency(零延时),基本不会有延时效果,但是压缩效果不太好。
完整测试项目的代码下载地址:点击打开链接