rt, 这个是帮别人写的一个项目。 主要流程就是通过 live555 接受rtsp数据。
这里我写成了一个c++ 接口, 可以接受若干urls, 同时每隔60s输出这些urls的h264数据。 我照着testRTSPCLient写的, 因为那个文件太长了, 所以我给分开了。
废话不多少, 上传代码。
这个是头文件, 摘自testRTSPCLient, 同时加上了自己写的类。
call.h
#include "liveMedia.hh"
#include "BasicUsageEnvironment.hh"
#include <iostream>
using namespace std;
#include <vector>
#include <string>
#include <map>
// Forward function definitions:
// RTSP 'response handlers':
void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString);
void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString);
void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString);
// Other event handler functions:
void subsessionAfterPlaying(void* clientData); // called when a stream's subsession (e.g., audio or video substream) ends
void subsessionByeHandler(void* clientData); // called when a RTCP "BYE" is received for a subsession
void streamTimerHandler(void* clientData);
// called at the end of a stream's expected duration (if the stream has not already signaled its end using a RTCP "BYE")
// The main streaming routine (for each "rtsp://" URL):
void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL);
// Used to iterate through each stream's 'subsessions', setting up each one:
void setupNextSubsession(RTSPClient* rtspClient);
// Used to shut down and close a stream (including its "RTSPClient" object):
void shutdownStream(RTSPClient* rtspClient, int exitCode = 1);
UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient);
UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession);
// Define a class to hold per-stream state that we maintain throughout each stream's lifetime:
class StreamClientState {
public:
StreamClientState();
virtual ~StreamClientState();
public:
MediaSubsessionIterator* iter;
MediaSession* session;
MediaSubsession* subsession;
TaskToken streamTimerTask;
double duration;
};
// If you're streaming just a single stream (i.e., just from a single URL, once), then you can define and use just a single
// "StreamClientState" structure, as a global variable in your application. However, because - in this demo application - we're
// showing how to play multiple streams, concurrently, we can't do that. Instead, we have to have a separate "StreamClientState"
// structure for each "RTSPClient". To do this, we subclass "RTSPClient", and add a "StreamClientState" field to the subclass:
class ourRTSPClient: public RTSPClient {
public:
static ourRTSPClient* createNew(UsageEnvironment& env, char const* rtspURL,
int verbosityLevel = 0,
char const* applicationName = NULL,
portNumBits tunnelOverHTTPPortNum = 0);
protected:
ourRTSPClient(UsageEnvironment& env, char const* rtspURL,
int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum);
// called only by createNew();
virtual ~ourRTSPClient();
public:
StreamClientState scs;
};
// Define a data sink (a subclass of "MediaSink") to receive the data for each subsession (i.e., each audio or video 'substream').
// In practice, this might be a class (or a chain of classes) that decodes and then renders the incoming audio or video.
// Or it might be a "FileSink", for outputting the received data into a file (as is done by the "openRTSP" application).
// In this example code, however, we define a simple 'dummy' sink that receives incoming data, but does nothing with it.
class DummySink: public MediaSink {
public:
static DummySink* createNew(UsageEnvironment& env,
MediaSubsession& subsession, // identifies the kind of data that's being received
char const* streamId = NULL); // identifies the stream itself (optional)
private:
DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId);
// called only by "createNew()"
virtual ~DummySink();
static void afterGettingFrame(void* clientData, unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds);
void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned durationInMicroseconds);
private:
// redefined virtual functions:
virtual Boolean continuePlaying();
private:
u_int8_t* fReceiveBuffer;
MediaSubsession& fSubsession;
char* fStreamId;
//////////////////////////////////////////////////////////////////////////
// my code
private: //H264
u_int8_t* fReceiveBufferadd4;
u_int8_t const* sps;
unsigned spsSize;
u_int8_t const* pps;
unsigned ppsSize;
public:void setSprop(u_int8_t const* prop, unsigned size);
// mycode end
//////////////////////////////////////////////////////////////////////////
};
//////////////////////////////////////////////////////////////////////////
// my code
class zjk
{
public:
zjk();
void doEventLoopzjk(BasicTaskScheduler0* Basicscheduler);
};
// my code
//////////////////////////////////////////////////////////////////////////
这个是类的实现
class.cpp
#include "call.h"
#include <sstream>
//////////////////////////////////////////////////////////////////////////
// my variable
extern vector<string> data;
extern map<string, int> inds;
extern int nowind;
extern string nowstr;
extern int duration;
extern bool isend;
//
//////////////////////////////////////////////////////////////////////////
// Implementation of "ourRTSPClient":
ourRTSPClient* ourRTSPClient::createNew(UsageEnvironment& env, char const* rtspURL,
int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum) {
return new ourRTSPClient(env, rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum);
}
ourRTSPClient::ourRTSPClient(UsageEnvironment& env, char const* rtspURL,
int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum)
: RTSPClient(env,rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum, -1) {
}
ourRTSPClient::~ourRTSPClient() {
}
// Implementation of "StreamClientState":
StreamClientState::StreamClientState()
: iter(NULL), session(NULL), subsession(NULL), streamTimerTask(NULL), duration(0.0) {
}
StreamClientState::~StreamClientState() {
delete iter;
if (session != NULL) {
// We also need to delete "session", and unschedule "streamTimerTask" (if set)
UsageEnvironment& env = session->envir(); // alias
env.taskScheduler().unscheduleDelayedTask(streamTimerTask);
Medium::close(session);
}
}
// Implementation of "DummySink":
// Even though we're not going to be doing anything with the incoming data, we still need to receive it.
// Define the size of the buffer that we'll use:
#define DUMMY_SINK_RECEIVE_BUFFER_SIZE 100000
DummySink* DummySink::createNew(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId) {
return new DummySink(env, subsession, streamId);
}
DummySink::DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId)
: MediaSink(env),
fSubsession(subsession) {
fStreamId = strDup(streamId);
fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
//////////////////////////////////////////////////////////////////////////
// my dcde
fReceiveBufferadd4 = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE+4];
fReceiveBufferadd4[0] = 0;
fReceiveBufferadd4[1] = 0;
fReceiveBufferadd4[2] = 0;
fReceiveBufferadd4[3] = 1;
// my code
//////////////////////////////////////////////////////////////////////////
}
DummySink::~DummySink() {
delete[] fReceiveBuffer;
delete[] fStreamId;
delete [] fReceiveBufferadd4;
}
void DummySink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned durationInMicroseconds) {
DummySink* sink = (DummySink*)clientData;
sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
}
// If you don't want to see debugging output for each received frame, then comment out the following line:
// #define DEBUG_PRINT_EACH_RECEIVED_FRAME 1
//////////////////////////////////////////////////////////////////////////
// my code
void DummySink::setSprop(u_int8_t const* prop, unsigned size)
{
u_int8_t *buf;
u_int8_t *buf_start;
buf = new u_int8_t [1000];
buf_start = buf + 4;
buf[0] = 0;
buf[1] = 0;
buf[2] = 0;
buf[3] = 1;
memcpy (buf_start, prop, size);
std::stringstream stream;
for (int i = 0; i< size+4; i++)
{
stream << buf[i];
}
nowstr = stream.str();
data[nowind] = data[nowind] + nowstr;
delete [] buf;
// envir() << "after setSprop\n";
}
// my code end
//////////////////////////////////////////////////////////////////////////
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
// We've just received a frame of data. (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
}
#ifdef DEBUG_PRINT_NPT
envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
#endif
envir() << "\n";
#endif
//////////////////////////////////////////////////////////////////////////
// my code
if (!strcmp("video", fSubsession.mediumName()) &&
!strcmp("H264", fSubsession.codecName()))
{
if (frameSize + 4 != 0)
{
memcpy (fReceiveBufferadd4 + 4, fReceiveBuffer, frameSize);
std::stringstream stream;
for (int i = 0; i< frameSize+4; i++)
{
stream << fReceiveBufferadd4[i];
}
char name[256];
sprintf(name, "%s", fStreamId);
int strl = strlen(name);
name[strl-1] = '\0';
nowind = inds[name];
nowstr = stream.str();
data[nowind] = data[nowind] + nowstr;
}
int height = fSubsession.videoHeight();
int width = fSubsession.videoWidth();
}
// ,y code end
//////////////////////////////////////////////////////////////////////////
// Then continue, to request the next frame of data:
continuePlaying();
}
Boolean DummySink::continuePlaying() {
if (fSource == NULL) return False; // sanity check (should not happen)
// Request the next frame of data from our input source. "afterGettingFrame()" will get called later, when it arrives:
fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
afterGettingFrame, this,
onSourceClosure, this);
return True;
}
//////////////////////////////////////////////////////////////////////////
zjk::zjk()
{
}
void zjk::doEventLoopzjk(BasicTaskScheduler0* Basicscheduler)
{ // Repeatedly loop, handling readble sockets and timed events:
while (isend) {
//printf("zjk\n");
Basicscheduler->SingleStep();
//ADD Sth else
}
}
这个是方法的实现
method.cpp
#include "call.h"
//////////////////////////////////////////////////////////////////////////
// my variable
extern vector<string> data;
extern map<string, int> inds;
extern int nowind;
extern string nowstr;
extern int duration;
extern bool isend;
//
//////////////////////////////////////////////////////////////////////////
#define RTSP_CLIENT_VERBOSITY_LEVEL 1 // by default, print verbose output from each "RTSPClient"
static unsigned rtspClientCount = 0; // Counts how many streams (i.e., "RTSPClient"s) are currently in use.
void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL) {
// Begin by creating a "RTSPClient" object. Note that there is a separate "RTSPClient" object for each stream that we wish
// to receive (even if more than stream uses the same "rtsp://" URL).
RTSPClient* rtspClient = ourRTSPClient::createNew(env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName);
if (rtspClient == NULL) {
env << "Failed to create a RTSP client for URL \"" << rtspURL << "\": " << env.getResultMsg() << "\n";
return;
}
++rtspClientCount;
string tmp = string(rtspClient->url());
// Next, send a RTSP "DESCRIBE" command, to get a SDP description for the stream.
// Note that this command - like all RTSP commands - is sent asynchronously; we do not block, waiting for a response.
// Instead, the following function call returns immediately, and we handle the RTSP response later, from within the event loop:
rtspClient->sendDescribeCommand(continueAfterDESCRIBE);
}
// Implementation of the RTSP 'response handlers':
void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString) {
do {
UsageEnvironment& env = rtspClient->envir(); // alias
StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
if (resultCode != 0) {
env << *rtspClient << "Failed to get a SDP description: " << resultString << "\n";
delete[] resultString;
break;
}
char* const sdpDescription = resultString;
env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n";
// Create a media session object from this SDP description:
scs.session = MediaSession::createNew(env, sdpDescription);
delete[] sdpDescription; // because we don't need it anymore
if (scs.session == NULL) {
env << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env.getResultMsg() << "\n";
break;
} else if (!scs.session->hasSubsessions()) {
env << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)\n";
break;
}
// Then, create and set up our data source objects for the session. We do this by iterating over the session's 'subsessions',
// calling "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, on each one.
// (Each 'subsession' will have its own data source.)
scs.iter = new MediaSubsessionIterator(*scs.session);
setupNextSubsession(rtspClient);
return;
} while (0);
// An unrecoverable error occurred with this stream.
shutdownStream(rtspClient);
}
// By default, we request that the server stream its data using RTP/UDP.
// If, instead, you want to request that the server stream via RTP-over-TCP, change the following to True:
#define REQUEST_STREAMING_OVER_TCP False
void setupNextSubsession(RTSPClient* rtspClient) {
UsageEnvironment& env = rtspClient->envir(); // alias
StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
scs.subsession = scs.iter->next();
if (scs.subsession != NULL) {
if (!scs.subsession->initiate()) {
env << *rtspClient << "Failed to initiate the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n";
setupNextSubsession(rtspClient); // give up on this subsession; go to the next one
} else {
env << *rtspClient << "Initiated the \"" << *scs.subsession << "\" subsession (";
if (scs.subsession->rtcpIsMuxed()) {
env << "client port " << scs.subsession->clientPortNum();
} else {
env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
}
env << ")\n";
// Continue setting up this subsession, by sending a RTSP "SETUP" command:
rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, REQUEST_STREAMING_OVER_TCP);
}
return;
}
// We've finished setting up all of the subsessions. Now, send a RTSP "PLAY" command to start the streaming:
if (scs.session->absStartTime() != NULL) {
// Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command:
rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, scs.session->absStartTime(), scs.session->absEndTime());
} else {
scs.duration = scs.session->playEndTime() - scs.session->playStartTime();
rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY);
}
}
void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) {
do {
UsageEnvironment& env = rtspClient->envir(); // alias
StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
if (resultCode != 0) {
env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString << "\n";
break;
}
env << *rtspClient << "Set up the \"" << *scs.subsession << "\" subsession (";
if (scs.subsession->rtcpIsMuxed()) {
env << "client port " << scs.subsession->clientPortNum();
} else {
env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
}
env << ")\n";
//////////////////////////////////////////////////////////////////////////
// mycode
const char *sprop = scs.subsession->fmtp_spropparametersets();
u_int8_t const* sps = NULL;
unsigned spsSize = 0;
u_int8_t const* pps = NULL;
unsigned ppsSize = 0;
if (sprop != NULL) {
unsigned int numSPropRecords;
SPropRecord* sPropRecords = parseSPropParameterSets(sprop, numSPropRecords);
for (unsigned i = 0; i < numSPropRecords; ++i) {
if (sPropRecords[i].sPropLength == 0) continue; // bad data
u_int8_t nal_unit_type = (sPropRecords[i].sPropBytes[0])&0x1F;
if (nal_unit_type == 7/*SPS*/) {
sps = sPropRecords[i].sPropBytes;
spsSize = sPropRecords[i].sPropLength;
} else if (nal_unit_type == 8/*PPS*/) {
pps = sPropRecords[i].sPropBytes;
ppsSize = sPropRecords[i].sPropLength;
}
}
}
// mycode end
//////////////////////////////////////////////////////////////////////////
// Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it.
// (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
// after we've sent a RTSP "PLAY" command.)
scs.subsession->sink = DummySink::createNew(env, *scs.subsession, rtspClient->url());
// perhaps use your own custom "MediaSink" subclass instead
if (scs.subsession->sink == NULL) {
env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession
<< "\" subsession: " << env.getResultMsg() << "\n";
break;
}
env << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession\n";
scs.subsession->miscPtr = rtspClient; // a hack to let subsession handler functions get the "RTSPClient" from the subsession
//////////////////////////////////////////////////////////////////////////
// mycode
char name[256];
sprintf(name, "%s", rtspClient->url());
int strl = strlen(name);
name[strl-1] = '\0';
nowind = inds[name];
if (sps != NULL) {
((DummySink *)scs.subsession->sink)->setSprop(sps, spsSize);
}
if (pps != NULL) {
((DummySink *)scs.subsession->sink)->setSprop(pps, ppsSize);
}
//mydode end
//////////////////////////////////////////////////////////////////////////
scs.subsession->sink->startPlaying(*(scs.subsession->readSource()),
subsessionAfterPlaying, scs.subsession);
// Also set a handler to be called if a RTCP "BYE" arrives for this subsession:
if (scs.subsession->rtcpInstance() != NULL) {
scs.subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, scs.subsession);
}
} while (0);
delete[] resultString;
// Set up the next subsession, if any:
setupNextSubsession(rtspClient);
}
void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString) {
Boolean success = False;
do {
UsageEnvironment& env = rtspClient->envir(); // alias
StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
if (resultCode != 0) {
env << *rtspClient << "Failed to start playing session: " << resultString << "\n";
break;
}
// Set a timer to be handled at the end of the stream's expected duration (if the stream does not already signal its end
// using a RTCP "BYE"). This is optional. If, instead, you want to keep the stream active - e.g., so you can later
// 'seek' back within it and do another RTSP "PLAY" - then you can omit this code.
// (Alternatively, if you don't want to receive the entire stream, you could set this timer for some shorter value.)
if (scs.duration > 0) {
scs.duration = duration;
unsigned const delaySlop = 0; // number of seconds extra to delay, after the stream's expected duration. (This is optional.)
scs.duration += delaySlop;
unsigned uSecsToDelay = (unsigned)(scs.duration*1000000);
scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient);
}
env << *rtspClient << "Started playing session";
if (scs.duration > 0) {
env << " (for up to " << scs.duration << " seconds)";
}
env << "...\n";
success = True;
} while (0);
delete[] resultString;
if (!success) {
// An unrecoverable error occurred with this stream.
shutdownStream(rtspClient);
}
}
// Implementation of the other event handlers:
void subsessionAfterPlaying(void* clientData) {
MediaSubsession* subsession = (MediaSubsession*)clientData;
RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr);
// Begin by closing this subsession's stream:
Medium::close(subsession->sink);
subsession->sink = NULL;
// Next, check whether *all* subsessions' streams have now been closed:
MediaSession& session = subsession->parentSession();
MediaSubsessionIterator iter(session);
while ((subsession = iter.next()) != NULL) {
if (subsession->sink != NULL) return; // this subsession is still active
}
// All subsessions' streams have now been closed, so shutdown the client:
shutdownStream(rtspClient);
}
void subsessionByeHandler(void* clientData) {
MediaSubsession* subsession = (MediaSubsession*)clientData;
RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr;
UsageEnvironment& env = rtspClient->envir(); // alias
env << *rtspClient << "Received RTCP \"BYE\" on \"" << *subsession << "\" subsession\n";
// Now act as if the subsession had closed:
subsessionAfterPlaying(subsession);
}
void streamTimerHandler(void* clientData) {
ourRTSPClient* rtspClient = (ourRTSPClient*)clientData;
StreamClientState& scs = rtspClient->scs; // alias
scs.streamTimerTask = NULL;
// Shut down the stream:
shutdownStream(rtspClient);
}
void shutdownStream(RTSPClient* rtspClient, int exitCode) {
UsageEnvironment& env = rtspClient->envir(); // alias
StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
// First, check whether any subsessions have still to be closed:
if (scs.session != NULL) {
Boolean someSubsessionsWereActive = False;
MediaSubsessionIterator iter(*scs.session);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
if (subsession->sink != NULL) {
Medium::close(subsession->sink);
subsession->sink = NULL;
if (subsession->rtcpInstance() != NULL) {
subsession->rtcpInstance()->setByeHandler(NULL, NULL); // in case the server sends a RTCP "BYE" while handling "TEARDOWN"
}
someSubsessionsWereActive = True;
}
}
if (someSubsessionsWereActive) {
// Send a RTSP "TEARDOWN" command, to tell the server to shutdown the stream.
// Don't bother handling the response to the "TEARDOWN".
rtspClient->sendTeardownCommand(*scs.session, NULL);
}
}
env << *rtspClient << "Closing the stream.\n";
Medium::close(rtspClient);
// Note that this will also cause this stream's "StreamClientState" structure to get reclaimed.
if (--rtspClientCount == 0) {
// The final stream has ended, so exit the application now.
// (Of course, if you're embedding this code into your own application, you might want to comment this out,
// and replace it with "eventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, and continue running "main()".)
// exit(exitCode);
isend = 0;
// return;
}
}
// A function that outputs a string that identifies each stream (for debugging output). Modify this if you wish:
UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient) {
return env << "[URL:\"" << rtspClient.url() << "\"]: ";
}
// A function that outputs a string that identifies each subsession (for debugging output). Modify this if you wish:
UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession) {
return env << subsession.mediumName() << "/" << subsession.codecName();
}
这里的几个extern是为了传输数据用的
同时我用了map来正确处理不同的url数据。 另外我的url明明是*.mkv, 但是live555貌似会修改成*.mkv/, 所以我在用map的时候会把最后一个/去掉, 不知道是不是特例。
下面是c++调用函数
maincall.h, maincall.cpp
#include "call.h"
vector<string> maincall(vector<string> urls);
#include "maincall.h"
char eventLoopWatchVariable = 0;
//////////////////////////////////////////////////////////////////////////
// my variable
vector<string> data;
map<string, int> inds;
int nowind;
string nowstr;
int duration;
bool isend;
//
//////////////////////////////////////////////////////////////////////////
vector<string> maincall(vector<string> urls) {
// Begin by setting up our usage environment:
TaskScheduler* scheduler = BasicTaskScheduler::createNew();
UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
/*
// We need at least one "rtsp://" URL argument:
if (argc < 2) {
usage(*env, argv[0]);
return 1;
}
// There are argc-1 URLs: argv[1] through argv[argc-1]. Open and start streaming each one:
for (int i = 1; i <= argc-1; ++i) {
openURL(*env, argv[0], argv[i]);
}
*/
//////////////////////////////////////////////////////////////////////////
// mycode
for (int i = 0; i< urls.size(); i++)
{
string url = urls[i];
openURL(*env, "play", url.c_str());
nowind = data.size();
nowstr = "";
inds[url] = nowind;
data.push_back(nowstr);
}
duration = 60;
isend = 1;
// mycode end
//////////////////////////////////////////////////////////////////////////
// All subsequent activity takes place within the event loop:
// env->taskScheduler().doEventLoop(&eventLoopWatchVariable);
// This function call does not return, unless, at some point in time, "eventLoopWatchVariable" gets set to something non-zero.
zjk *z = new zjk();
z->doEventLoopzjk((BasicTaskScheduler0 *)scheduler);
vector<string> results;
for (int i = 0; i< data.size(); i++)
{
nowstr = data[i];
results.push_back(nowstr);
}
env->reclaim(); env = NULL;
delete scheduler; scheduler = NULL;
return results;
// If you choose to continue the application past this point (i.e., if you comment out the "return 0;" statement above),
// and if you don't intend to do anything more with the "TaskScheduler" and "UsageEnvironment" objects,
// then you can also reclaim the (small) memory used by these objects by uncommenting the following code:
/*
env->reclaim(); env = NULL;
delete scheduler; scheduler = NULL;
*/
}
这里我设定duration为60s, 你可以改成你想要的值。
最后是main文件, 一个纯粹的调用输出。
#include "maincall.h"
#include <iostream>
using namespace std;
#include <vector>
#include <string>
int main()
{
vector<string> urls;
string a = "rtsp://127.0.0.1/1.mkv";
string b = "rtsp://127.0.0.1/2.mkv";
string c = "rtsp://127.0.0.1/3.mkv";
string d = "rtsp://127.0.0.1/4.mkv";
urls.push_back(a);
urls.push_back(b);
urls.push_back(c);
urls.push_back(d);
vector<string> results;
results = maincall(urls);
for (int i = 0; i< results.size(); i++)
{
string str = results[i];
int len = str.length();
char name[256];
sprintf(name, "%d.264", i+1);
FILE *fp = fopen(name, "wb");
fwrite(str.c_str(), len, 1, fp);
fclose(fp);
}
return 0;
}
参考:
http://blog.csdn.net/fengshuiyue/article/details/11873843
http://m.blog.csdn.net/blog/dgyanyong/41695503
http://www.live555.com/liveMedia/faq.html#testRTSPClient-how-to-decode-data
https://github.com/yuvalk/demoLive555withFFMPEG/blob/master/RTSPFF.cpp#L606
http://www.cnblogs.com/gmapapi/archive/2013/01/18/2866405.html
http://m.blog.csdn.net/blog/zhangjikuan/38403401
over
enjoy!