一个基于JRTPLIB的轻量级RTSP客户端(myRTSPClient)——实现篇:(五)用户接口层之提取媒体流数据

时间:2020-11-28 17:14:19

当RTSP客户端向RTSP服务端发送完PLAY命令后,RTSP服务端就会另外开启UDP端口(SDP协商定义的端口)发送RTP媒体流数据包。这些数据包之间会间隔一段时间(毫秒级)陆续被发送到RTSP客户端,此时RTSP客户端可以调用GetMediaData等接口获取媒体流数据。

一、uint8_t * RtspClient::GetMediaData(string media_type, uint8_t * buf, size_t * size, size_t max_size)

该函数的作用即获取媒体流数据,并将数据放入参数buf中,数据大小放入size中,media_type可以为字符串“audio”或“video”,max_size为buf的最大值。

if(it->second.MediaType == "video") return GetVideoData(&(it->second), buf, size, max_size);
if(it->second.MediaType == "audio") return GetAudioData(&(it->second), buf, size, max_size);

该函数首先在MediaSessionMap中查询匹配media_type的媒体会话,然后选择调用GetVideoData或GetAudioData。

 uint8_t * RtspClient::GetMediaData(string media_type, uint8_t * buf, size_t * size, size_t max_size)
{
MyRegex Regex;
map<string, MediaSession>::iterator it;
bool IgnoreCase = true;
if(!buf) return NULL;
if(!size) return NULL; *size = ; for(it = MediaSessionMap->begin(); it != MediaSessionMap->end(); it++) {
if(Regex.Regex(it->first.c_str(), media_type.c_str(), IgnoreCase)) break;
} if(it == MediaSessionMap->end()) {
fprintf(stderr, "%s: No such media session\n", __func__);
return NULL;
} if(it->second.MediaType == "video") return GetVideoData(&(it->second), buf, size, max_size);
if(it->second.MediaType == "audio") return GetAudioData(&(it->second), buf, size, max_size);
return NULL;
}

二、uint8_t * RtspClient::GetVideoData(MediaSession * media_session, uint8_t * buf, size_t * size, size_t max_size, bool get_vps_sps_pps_periodly)

if(true == get_vps_sps_pps_periodly) {
if(GetVideoDataCount >= GetSPS_PPS_Period) {
GetVideoDataCount = 0;

......

}

参数get_vps_sps_pps_periodly用来指示是否要周期性写入VPS、SPS和PPS(h264/h265中参数),默认为true。周期写入的目的是为了防止视频传输一开始将这些关键参数丢失,虽然会多耗费一点带宽,但是相对视频数据本身可谓九牛一毛,重要的是这样可以用来防止由于这些参数丢失所导致的诸如花屏之类的问题。

if(media_session->EncodeType == "H264") {
NALUTypeBaseTmp = &NaluBaseType_H264Obj;
} else if (media_session->EncodeType == "H265") {
NALUTypeBaseTmp = &NaluBaseType_H265Obj;
} else {
// Unknown Nalu type
printf("Unsupported codec type: %s\n", media_session->EncodeType.c_str());
return NULL;
}

然后根据media_session中的编码类型,获取RTP音视频传输解析层中的特定类对象(NaluBaseType_H264Obj、NaluBaseType_H265Obj)来处理视频数据。

do {
EndFlag = true;

......

EndFlag = NALUType->GetEndFlag();
} while(!EndFlag);

接着是循环获取视频帧的NALU单元,由于一个NALU单元常常会超过一个MTU(最大传输单元),所以NALU单元就会被分包,最后一个分包会带一个结束标志,如果获得了NALU单元的最后一个分包,则跳出该循环并从函数中返回。

 uint8_t * RtspClient::GetVideoData(MediaSession * media_session, uint8_t * buf, size_t * size, size_t max_size, bool get_vps_sps_pps_periodly)
{
if(!media_session || !buf || !size) return NULL; *size = ; const size_t GetSPS_PPS_Period = GET_SPS_PPS_PERIOD; // 30 times
if(true == get_vps_sps_pps_periodly) {
if(GetVideoDataCount >= GetSPS_PPS_Period) {
GetVideoDataCount = ; const size_t NALU_StartCodeSize = ;
size_t SizeTmp = ;
if(!GetVPSNalu(buf + (*size), &SizeTmp) || SizeTmp <= NALU_StartCodeSize) {
// fprintf(stderr, "\033[31mWARNING: No H264 VPS\033[0m\n");
} else {
*size += SizeTmp;
}
if(!GetSPSNalu(buf + (*size), &SizeTmp) || SizeTmp <= NALU_StartCodeSize) {
fprintf(stderr, "\033[31mWARNING: No SPS\033[0m\n");
} else {
*size += SizeTmp;
}
if(!GetPPSNalu(buf + (*size), &SizeTmp) || SizeTmp <= NALU_StartCodeSize) {
fprintf(stderr, "\033[31mWARNING: No PPS\033[0m\n");
} else {
*size += SizeTmp;
}
return buf;
} else {
GetVideoDataCount++;
}
} size_t SizeTmp = ;
bool EndFlag = false;
NALUTypeBase * NALUTypeBaseTmp = NULL;
NALUTypeBase * NALUType; int PM = media_session->Packetization;
if(!IS_PACKET_MODE_VALID(PM)) {
cerr << "WARNING:Invalid Packetization Mode" << endl;
return NULL;
}
if(media_session->EncodeType == "H264") {
NALUTypeBaseTmp = &NaluBaseType_H264Obj;
} else if (media_session->EncodeType == "H265") {
NALUTypeBaseTmp = &NaluBaseType_H265Obj;
} else {
// Unknown Nalu type
printf("Unsupported codec type: %s\n", media_session->EncodeType.c_str());
return NULL;
} do {
EndFlag = true;
if(!media_session->GetMediaData(VideoBuffer.Buf, &SizeTmp)) return NULL;
if( == SizeTmp) {
cerr << "No RTP data" << endl;
return NULL;
}
int NT;
NT = NALUTypeBaseTmp->ParseNALUHeader_Type(VideoBuffer.Buf);
NALUType = NALUTypeBaseTmp->GetNaluRtpType(PM, NT);
if(NULL == NALUType) {
printf("Unknown NALU Type: %s\n", media_session->EncodeType.c_str());
return NULL;
} if(SizeTmp > VideoBuffer.Size) {
cerr << "Error: RTP Packet too large(" << SizeTmp << " bytes > " << VideoBuffer.Size << "bytes)" << endl;
return NULL;
} if(*size + SizeTmp > max_size) {
fprintf(stderr, "\033[31mWARNING: NALU truncated because larger than buffer: %u(NALU size) > %u(Buffer size)\033[0m\n", *size + SizeTmp, max_size);
return buf;
} SizeTmp = NALUType->CopyData(buf + (*size), VideoBuffer.Buf, SizeTmp);
*size += SizeTmp;
EndFlag = NALUType->GetEndFlag();
} while(!EndFlag); return buf;
}

三、uint8_t * RtspClient::GetAudioData(MediaSession * media_session, uint8_t * buf, size_t * size, size_t max_size)

该函数和GetVideoData的思路一样:

先是获取RTP音视频传输解析层中的特定类对象(MPEG_AudioObj),然后再去获取音频数据流。

 uint8_t * RtspClient::GetAudioData(MediaSession * media_session, uint8_t * buf, size_t * size, size_t max_size)
{
if(!media_session || !buf || !size) return NULL; *size = ; size_t SizeTmp = ;
MPEGTypeBase * MPEGType; if(!media_session->GetMediaData(AudioBuffer.Buf, &SizeTmp)) return NULL;
if( == SizeTmp) {
cerr << "No RTP data" << endl;
return NULL;
} MPEGType = &MPEG_AudioObj; if(SizeTmp > AudioBuffer.Size) {
cerr << "Error: RTP Packet too large(" << SizeTmp << " bytes > " << AudioBuffer.Size << "bytes)" << endl;
return NULL;
} if(*size + SizeTmp > max_size) {
fprintf(stderr, "\033[31mWARNING: NALU truncated because larger than buffer: %u(NALU size) > %u(Buffer size)\033[0m\n", *size + SizeTmp, max_size);
return buf;
} SizeTmp = MPEGType->CopyData(buf + (*size), AudioBuffer.Buf, SizeTmp);
*size += SizeTmp; return buf;
}

上一篇                       回目录                    下一篇