下面两种方式是直接翻译过来的,还有问题,比如指针的使用和值的传入。考虑C#和C++的差异,还是要抱着怀疑的态度去看待,不一定是对的。
H264视频解码网络流:
using FFmpeg.AutoGen; using RTForwardServer; using System; using System.Collections.Generic; using System.Drawing; using System.Drawing.Imaging; using System.Linq; using System.Net.Sockets; using System.Runtime.InteropServices; using System.Text; using System.Threading.Tasks; using System.Windows.Forms; /// <summary> /// http://blog.csdn.net/jammg/article/details/52750241 /// </summary> namespace AVParser.Parser { public unsafe class H264Parser { public PictureBox ShowPictureBox {set;get;} /// <summary> /// 解码H264网络流 /// </summary> /// <param name="socket">Socket对象</param> public unsafe void Parser(Socket socket) { AVCodecContext* pCodecCtx = null; AVCodecParserContext* pCodecParserCtx = null; AVCodec* pCodec = null; AVFrame* pFrame = null; //yuv AVPacket packet; //h264 AVPicture picture; //储存rgb格式图片 SwsContext* pSwsCtx = null; AVCodecID codec_id = AVCodecID.AV_CODEC_ID_H264; int ret; //FFmpeg可执行二进制命令工具查找 //FFmpegBinariesHelper.RegisterFFmpegBinaries(); //ffmpeg.av_register_all(); //ffmpeg.avcodec_register_all(); /* 初始化AVCodec */ pCodec = ffmpeg.avcodec_find_decoder(codec_id); /* 初始化AVCodecContext,只是分配,还没打开 */ pCodecCtx = ffmpeg.avcodec_alloc_context3(pCodec); /* 初始化AVCodecParserContext */ pCodecParserCtx = ffmpeg.av_parser_init((int)AVCodecID.AV_CODEC_ID_H264); if (null==pCodecParserCtx) { return;//终止执行 } /* we do not send complete frames,什么意思? */ if (pCodec->capabilities > 0 && ffmpeg.CODEC_CAP_TRUNCATED > 0) pCodecCtx->flags |= ffmpeg.CODEC_FLAG_TRUNCATED; /* 打开解码器 */ ret = ffmpeg.avcodec_open2(pCodecCtx, pCodec, null); if (ret < 0) { return;//终止执行 } pFrame = ffmpeg.av_frame_alloc(); ffmpeg.av_init_packet(&packet); packet.size = 0; packet.data = null; const int in_buffer_size = 4096; //uint in_buffer[in_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE] = { 0 }; byte[] in_buffer=new byte[in_buffer_size]; byte *cur_ptr; int cur_size; int got; bool is_first_time = true; while (true) { // Socket通信实例接收信息 //cur_size = 0;//recv(m_socket, (char*)in_buffer, in_buffer_size, 0); cur_size = socket.Receive(in_buffer, in_buffer_size, SocketFlags.None); Console.WriteLine("H264Parser Socket Receive: data byte string={0}", BitConverter.ToString(in_buffer)); if (cur_size == 0) break; //cur_ptr = in_buffer;//指针转换问题 cur_ptr = (byte*)ffmpeg.av_malloc(in_buffer_size); while (cur_size > 0) { /* 返回解析了的字节数 */ int len = ffmpeg.av_parser_parse2(pCodecParserCtx, pCodecCtx, &packet.data, &packet.size, (byte*)cur_ptr, cur_size, ffmpeg.AV_NOPTS_VALUE, ffmpeg.AV_NOPTS_VALUE, ffmpeg.AV_NOPTS_VALUE); cur_ptr += len; cur_size -= len; if (packet.size == 0) continue; //switch (pCodecParserCtx->pict_type) //{ // case AV_PICTURE_TYPE_I: printf("Type: I\t"); break; // case AV_PICTURE_TYPE_P: printf("Type: P\t"); break; // case AV_PICTURE_TYPE_B: printf("Type: B\t"); break; // default: printf("Type: Other\t"); break; //} //printf("Output Number:%4d\t", pCodecParserCtx->output_picture_number); //printf("Offset:%8ld\n", pCodecParserCtx->cur_offset); ret = ffmpeg.avcodec_decode_video2(pCodecCtx, pFrame, &got, &packet); if (ret < 0) { return;//终止执行 } if (got>0) { if (is_first_time) //分配格式转换存储空间 { // C AV_PIX_FMT_RGB32 统一改为 AVPixelFormat.AV_PIX_FMT_RGB24 pSwsCtx = ffmpeg.sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AVPixelFormat.AV_PIX_FMT_RGB24, ffmpeg.SWS_BICUBIC, null, null, null); ffmpeg.avpicture_alloc(&picture, AVPixelFormat.AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height); is_first_time = false; } /* YUV转RGB */ ffmpeg.sws_scale(pSwsCtx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, picture.data, picture.linesize); //QImage img(picture.data[0], pCodecCtx->width, pCodecCtx->height, QImage::Format_RGB32); //emit this-> signal_receive_one_image(img); //填充视频帧 //ffmpeg.avpicture_fill((AVPicture*)pFrame, cur_ptr, AVPixelFormat.AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height); #region 构造图片 var dstData = new byte_ptrArray4();// 声明形参 var dstLinesize = new int_array4();// 声明形参 // 目标媒体格式需要的字节长度 var convertedFrameBufferSize = ffmpeg.av_image_get_buffer_size(AVPixelFormat.AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height, 1); // 分配目标媒体格式内存使用 var convertedFrameBufferPtr = Marshal.AllocHGlobal(convertedFrameBufferSize); // 设置图像填充参数 ffmpeg.av_image_fill_arrays(ref dstData, ref dstLinesize, (byte*)convertedFrameBufferPtr, AVPixelFormat.AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height, 1); // 封装Bitmap图片 var bitmap = new Bitmap(pCodecCtx->width, pCodecCtx->height, dstLinesize[0], PixelFormat.Format24bppRgb, convertedFrameBufferPtr); ShowPictureBox.Image = bitmap; ShowPictureBox.Dispose(); #endregion } } } ShowPictureBox.Image = null; ShowPictureBox.Dispose(); ffmpeg.av_free_packet(&packet); ffmpeg.av_frame_free(&pFrame); ffmpeg.avpicture_free(&picture); ffmpeg.sws_freeContext(pSwsCtx); ffmpeg.avcodec_free_context(&pCodecCtx); ffmpeg.av_parser_close(pCodecParserCtx); } } }
ACC音频解码网络流:
using FFmpeg.AutoGen; using RTForwardServer; using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Media; using System.Net.Sockets; using System.Runtime.InteropServices; using System.Text; using System.Threading.Tasks; using System.Windows.Forms; /// <summary> /// http://blog.csdn.net/jammg/article/details/52750241 /// </summary> namespace AVParser.Parser { public unsafe class ACCParser { /// <summary> /// 解码AAC音频 /// </summary> /// <param name="socket">Socket对象</param> public unsafe void Parser(Socket socket) { AVCodecContext* pCodecCtx = null; AVCodecParserContext* pCodecParserCtx = null; AVCodec* pCodec = null; AVFrame* pFrame = null; AVPacket packet; AVCodecID codec_id = AVCodecID.AV_CODEC_ID_AAC; int ret; //FFmpeg可执行二进制命令工具查找 //FFmpegBinariesHelper.RegisterFFmpegBinaries(); //ffmpeg.av_register_all(); //ffmpeg.avcodec_register_all(); /* 初始化AVCodec */ pCodec = ffmpeg.avcodec_find_decoder(codec_id); /* 初始化AVCodecContext,只是分配,还没打开 */ pCodecCtx = ffmpeg.avcodec_alloc_context3(pCodec); /* 初始化AVCodecParserContext */ pCodecParserCtx = ffmpeg.av_parser_init((int)AVCodecID.AV_CODEC_ID_AAC); if (null==pCodecParserCtx) { Application.Exit();//退出程序 } /* we do not send complete frames,什么意思? */ if (pCodec->capabilities>0 && ffmpeg.CODEC_CAP_TRUNCATED > 0) pCodecCtx->flags |= ffmpeg.CODEC_FLAG_TRUNCATED; /* 打开解码器 */ ret = ffmpeg.avcodec_open2(pCodecCtx, pCodec, null); if (ret < 0) { return ;//终止执行 } pFrame = ffmpeg.av_frame_alloc(); ffmpeg.av_init_packet(&packet); packet.size = 0; packet.data = null; /* 存储一帧可以PCM,L(一个采样点)RLRLR..... ,用于播放 */ int out_buf_size=0; byte* out_buf = null; //FILE *fp = fopen("audio.pcm", "wb"); const int in_buffer_size = 4096; /** * AVPacket.buf.data 指向AVPacket.data ,AVPacket.buf.size = AVPacket.size + FF_INPUT_BUFFER_PADDING_SIZE */ //uint8_t in_buffer[in_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE] = { 0 }; //uint [] in_buffer =new uint[in_buffer_size]; byte [] in_buffer = new byte[in_buffer_size]; byte *cur_ptr; int cur_size; int got; bool is_first_time = true; while (true) { // Socket通信实例接收信息 //cur_size = 0;//recv(m_socket, (char*)in_buffer, in_buffer_size, 0); cur_size = socket.Receive(in_buffer, in_buffer.Length, SocketFlags.None); Console.WriteLine("ACCParser Socket Receive: data byte string={0}", BitConverter.ToString(in_buffer)); if (cur_size == 0) break; //cur_ptr = in_buffer;//指针转换问题 cur_ptr = (byte*)ffmpeg.av_malloc(in_buffer_size); while (cur_size > 0) { /* 返回解析了的字节数 */ int len = ffmpeg.av_parser_parse2(pCodecParserCtx, pCodecCtx, &packet.data, &packet.size, cur_ptr, cur_size, ffmpeg.AV_NOPTS_VALUE, ffmpeg.AV_NOPTS_VALUE, ffmpeg.AV_NOPTS_VALUE); cur_ptr += len; cur_size -= len; if (packet.size == 0) continue; ret = ffmpeg.avcodec_decode_audio4(pCodecCtx, pFrame, &got, &packet); if (ret < 0) { return;//终止执行 } if (got>0) { if (is_first_time) //分配格式转换存储空间 { out_buf_size = ffmpeg.av_samples_get_buffer_size( null, pCodecCtx->channels, pFrame->nb_samples, //读取一帧数据时每个声道读取的采样个数 pCodecCtx->sample_fmt, 1); out_buf = (byte*)ffmpeg.av_malloc((ulong)out_buf_size); if (out_buf == null) { return;//终止执行 } is_first_time = false; } UInt32* l = (UInt32*)pFrame->extended_data[0]; UInt32* r = (UInt32*)pFrame->extended_data[1]; //这里是针对AV_SAMPLE_FMT_FLTP格式的写入方式,其他音频格式的需要其他方式 for (int i = 0, j = 0; i < out_buf_size; i += 8, j++) { out_buf[i] = (byte)(r[j] & 0xff); out_buf[i + 1] = (byte)(r[j] >> 8 & 0xff); out_buf[i + 2] = (byte)(r[j] >> 16 & 0xff); out_buf[i + 3] = (byte)(r[j] >> 24 & 0xff); out_buf[i + 4] = (byte)(l[j] & 0xff); out_buf[i + 5] = (byte)(l[j] >> 8 & 0xff); out_buf[i + 6] = (byte)(l[j] >> 16 & 0xff); out_buf[i + 7] = (byte)(l[j] >> 24 & 0xff); } //std::string str(out_buf, out_buf_size); //emit this->signal_receive_one_audio_frame(str); //fwrite(out_buf, out_buf_size, 1, fp); //填充音频帧==此处似乎不再需要填充音频帧数据 //ffmpeg.avcodec_fill_audio_frame(pFrame, pFrame->channels,AVSampleFormat.AV_SAMPLE_FMT_FLTP,out_buf, out_buf_size,0); // byte*转为byte[] byte[] bytes = new byte[out_buf_size]; for (int i = 0; i < out_buf_size; i++) { bytes[i] = out_buf[i]; } // 播放音频数据 MemoryStream ms = new MemoryStream(bytes); SoundPlayer sp = new SoundPlayer(ms); sp.Play(); } } } ffmpeg.av_free_packet(&packet); ffmpeg.av_frame_free(&pFrame); ffmpeg.avcodec_free_context(&pCodecCtx); ffmpeg.av_parser_close(pCodecParserCtx); } } }