本文整理汇总了C++中MediaSubsession::readSource方法的典型用法代码示例。如果您正苦于以下问题:C++ MediaSubsession::readSource方法的具体用法?C++ MediaSubsession::readSource怎么用?C++ MediaSubsession::readSource使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MediaSubsession
的用法示例。
在下文中一共展示了MediaSubsession::readSource方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: iter
Boolean MediaSession
::initiateByMediaType(char const* mimeType,
MediaSubsession*& resultSubsession,
int useSpecialRTPoffset) {
// Look through this session's subsessions for media that match "mimeType"
resultSubsession = NULL;
MediaSubsessionIterator iter(*this);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
Boolean wasAlreadyInitiated = subsession->readSource() != NULL;
if (!wasAlreadyInitiated) {
// Try to create a source for this subsession:
if (!subsession->initiate(useSpecialRTPoffset)) return False;
}
// Make sure the source's MIME type is one that we handle:
if (strcmp(subsession->readSource()->MIMEtype(), mimeType) != 0) {
if (!wasAlreadyInitiated) subsession->deInitiate();
continue;
}
resultSubsession = subsession;
break; // use this
}
if (resultSubsession == NULL) {
envir().setResultMsg("Session has no usable media subsession");
return False;
}
return True;
}
示例2: iter
AVIFileSink::~AVIFileSink() {
completeOutputFile();
// Then, stop streaming and delete each active "AVISubsessionIOState":
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
if (subsession->readSource() != NULL) subsession->readSource()->stopGettingFrames();
AVISubsessionIOState* ioState
= (AVISubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
delete ioState;
}
// Then, delete the index records:
AVIIndexRecord* cur = fIndexRecordsHead;
while (cur != NULL) {
AVIIndexRecord* next = cur->next();
delete cur;
cur = next;
}
// Finally, close our output file:
CloseOutputFile(fOutFid);
}
示例3: beginQOSMeasurement
void beginQOSMeasurement() {
// Set up a measurement record for each active subsession:
struct timeval startTime;
gettimeofday(&startTime, NULL);
nextQOSMeasurementUSecs = startTime.tv_sec*1000000 + startTime.tv_usec;
qosMeasurementRecord* qosRecordTail = NULL;
MediaSubsessionIterator iter(*session);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
RTPSource* src = subsession->rtpSource();
#ifdef SUPPORT_REAL_RTSP
if (session->isRealNetworksRDT) src = (RTPSource*)(subsession->readSource()); // hack
#endif
if (src == NULL) continue;
qosMeasurementRecord* qosRecord
= new qosMeasurementRecord(startTime, src);
if (qosRecordHead == NULL) qosRecordHead = qosRecord;
if (qosRecordTail != NULL) qosRecordTail->fNext = qosRecord;
qosRecordTail = qosRecord;
}
// Then schedule the first of the periodic measurements:
scheduleNextQOSMeasurement();
}
示例4: continuePlaying
Boolean AVIFileSink::continuePlaying() {
// Run through each of our input session's 'subsessions',
// asking for a frame from each one:
Boolean haveActiveSubsessions = False;
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
FramedSource* subsessionSource = subsession->readSource();
if (subsessionSource == NULL) continue;
if (subsessionSource->isCurrentlyAwaitingData()) continue;
AVISubsessionIOState* ioState
= (AVISubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
haveActiveSubsessions = True;
unsigned char* toPtr = ioState->fBuffer->dataEnd();
unsigned toSize = ioState->fBuffer->bytesAvailable();
subsessionSource->getNextFrame(toPtr, toSize,
afterGettingFrame, ioState,
onSourceClosure, ioState);
}
if (!haveActiveSubsessions) {
envir().setResultMsg("No subsessions are currently active");
return False;
}
return True;
}
示例5: fOurSink
AVISubsessionIOState::AVISubsessionIOState(AVIFileSink& sink,
MediaSubsession& subsession)
: fOurSink(sink), fOurSubsession(subsession),
fMaxBytesPerSecond(0), fIsVideo(False), fIsAudio(False), fIsByteSwappedAudio(False), fNumFrames(0) {
fBuffer = new SubsessionBuffer(fOurSink.fBufferSize);
fPrevBuffer = sink.fPacketLossCompensate
? new SubsessionBuffer(fOurSink.fBufferSize) : NULL;
FramedSource* subsessionSource = subsession.readSource();
fOurSourceIsActive = subsessionSource != NULL;
fPrevPresentationTime.tv_sec = 0;
fPrevPresentationTime.tv_usec = 0;
}
示例6: MediaNet_Thread
//.........这里部分代码省略.........
madeProgress = True;
}
}
}
if (!madeProgress)
break;
// Perform additional 'setup' on each subsession, before playing them:
pThis->SetRtspStatus( RTSPStatus_Setup );
unsigned nResponseCode = NULL;
BOOL bSetupSuccess = setupStreams( &nResponseCode );
if ( !bSetupSuccess )
{
// setup失败!
if ( RTSPResp_Error_Server_Full == nResponseCode )
{
pThis->SetRtspStatus( RTSPStatus_Error_Server_Full );
}
else
{
pThis->SetRtspStatus( RTSPStatus_Idle );
}
break;
}
// Create output files:
if ( true )
{
// Create and start "FileSink"s for each subsession:
madeProgress = False;
iter.reset();
while ((subsession = iter.next()) != NULL)
{
if (subsession->readSource() == NULL) continue; // was not initiated
MediaSink *pDecodeSink = 0;
if (strcmp(subsession->mediumName(), "video") == 0 )
{
int nBandWidth = subsession->GetBandWidth();
if ( strcmp(subsession->codecName(), "MP4V-ES") == 0 )
{
CMpeg4StreamDecodeSink *pMsds = CMpeg4StreamDecodeSink::CreateNew( *env, 20000, nBandWidth );
pDecodeSink = pMsds;
}
else if ( strcmp( subsession->codecName(), "H264" ) == 0 )
{
CH264StreamDecodeSink *pHsds = CH264StreamDecodeSink::CreateNew( *env, 20000, nBandWidth );
pDecodeSink = pHsds;
}
else
{
continue;
}
}
subsession->sink = pDecodeSink;
if (subsession->sink == NULL)
{
*env << "Failed to create CH264StreamDecodeSink \"" << "\n";
}
subsession->sink->startPlaying(*(subsession->readSource()),
subsessionAfterPlaying,
示例7: printQOSData
void printQOSData(int exitCode) {
if (exitCode != 0 && statusCode == 0) statusCode = 2;
*env << "begin_QOS_statistics\n";
*env << "server_availability\t" << (statusCode == 1 ? 0 : 100) << "\n";
*env << "stream_availability\t" << (statusCode == 0 ? 100 : 0) << "\n";
// Print out stats for each active subsession:
qosMeasurementRecord* curQOSRecord = qosRecordHead;
if (session != NULL) {
MediaSubsessionIterator iter(*session);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
RTPSource* src = subsession->rtpSource();
#ifdef SUPPORT_REAL_RTSP
if (session->isRealNetworksRDT) src = (RTPSource*)(subsession->readSource()); // hack
#endif
if (src == NULL) continue;
*env << "subsession\t" << subsession->mediumName()
<< "/" << subsession->codecName() << "\n";
unsigned numPacketsReceived = 0, numPacketsExpected = 0;
if (curQOSRecord != NULL) {
numPacketsReceived = curQOSRecord->totNumPacketsReceived;
numPacketsExpected = curQOSRecord->totNumPacketsExpected;
}
*env << "num_packets_received\t" << numPacketsReceived << "\n";
*env << "num_packets_lost\t" << numPacketsExpected - numPacketsReceived << "\n";
if (curQOSRecord != NULL) {
unsigned secsDiff = curQOSRecord->measurementEndTime.tv_sec
- curQOSRecord->measurementStartTime.tv_sec;
int usecsDiff = curQOSRecord->measurementEndTime.tv_usec
- curQOSRecord->measurementStartTime.tv_usec;
double measurementTime = secsDiff + usecsDiff/1000000.0;
*env << "elapsed_measurement_time\t" << measurementTime << "\n";
*env << "kBytes_received_total\t" << curQOSRecord->kBytesTotal << "\n";
*env << "measurement_sampling_interval_ms\t" << qosMeasurementIntervalMS << "\n";
if (curQOSRecord->kbits_per_second_max == 0) {
// special case: we didn't receive any data:
*env <<
"kbits_per_second_min\tunavailable\n"
"kbits_per_second_ave\tunavailable\n"
"kbits_per_second_max\tunavailable\n";
} else {
*env << "kbits_per_second_min\t" << curQOSRecord->kbits_per_second_min << "\n";
*env << "kbits_per_second_ave\t"
<< (measurementTime == 0.0 ? 0.0 : 8*curQOSRecord->kBytesTotal/measurementTime) << "\n";
*env << "kbits_per_second_max\t" << curQOSRecord->kbits_per_second_max << "\n";
}
*env << "packet_loss_percentage_min\t" << 100*curQOSRecord->packet_loss_fraction_min << "\n";
double packetLossFraction = numPacketsExpected == 0 ? 1.0
: 1.0 - numPacketsReceived/(double)numPacketsExpected;
if (packetLossFraction < 0.0) packetLossFraction = 0.0;
*env << "packet_loss_percentage_ave\t" << 100*packetLossFraction << "\n";
*env << "packet_loss_percentage_max\t"
<< (packetLossFraction == 1.0 ? 100.0 : 100*curQOSRecord->packet_loss_fraction_max) << "\n";
#ifdef SUPPORT_REAL_RTSP
if (session->isRealNetworksRDT) {
RealRDTSource* rdt = (RealRDTSource*)src;
*env << "inter_packet_gap_ms_min\t" << rdt->minInterPacketGapUS()/1000.0 << "\n";
struct timeval totalGaps = rdt->totalInterPacketGaps();
double totalGapsMS = totalGaps.tv_sec*1000.0 + totalGaps.tv_usec/1000.0;
unsigned totNumPacketsReceived = rdt->totNumPacketsReceived();
*env << "inter_packet_gap_ms_ave\t"
<< (totNumPacketsReceived == 0 ? 0.0 : totalGapsMS/totNumPacketsReceived) << "\n";
*env << "inter_packet_gap_ms_max\t" << rdt->maxInterPacketGapUS()/1000.0 << "\n";
} else {
#endif
RTPReceptionStatsDB::Iterator statsIter(src->receptionStatsDB());
// Assume that there's only one SSRC source (usually the case):
RTPReceptionStats* stats = statsIter.next(True);
if (stats != NULL) {
*env << "inter_packet_gap_ms_min\t" << stats->minInterPacketGapUS()/1000.0 << "\n";
struct timeval totalGaps = stats->totalInterPacketGaps();
double totalGapsMS = totalGaps.tv_sec*1000.0 + totalGaps.tv_usec/1000.0;
unsigned totNumPacketsReceived = stats->totNumPacketsReceived();
*env << "inter_packet_gap_ms_ave\t"
<< (totNumPacketsReceived == 0 ? 0.0 : totalGapsMS/totNumPacketsReceived) << "\n";
*env << "inter_packet_gap_ms_max\t" << stats->maxInterPacketGapUS()/1000.0 << "\n";
}
#ifdef SUPPORT_REAL_RTSP
}
#endif
curQOSRecord = curQOSRecord->fNext;
}
}
}
*env << "end_QOS_statistics\n";
delete qosRecordHead;
}
示例8: iter
//.........这里部分代码省略.........
int audiofound = 0, videofound = 0;
// Create RTP receivers (sources) for each subsession:
MediaSubsessionIterator iter(*mediaSession);
MediaSubsession* subsession;
unsigned desiredReceiveBufferSize;
while ((subsession = iter.next()) != NULL) {
// Ignore any subsession that's not audio or video:
if (strcmp(subsession->mediumName(), "audio") == 0) {
if (audiofound) {
fprintf(stderr, "Additional subsession \"audio/%s\" skipped\n", subsession->codecName());
continue;
}
desiredReceiveBufferSize = 100000;
} else if (strcmp(subsession->mediumName(), "video") == 0) {
if (videofound) {
fprintf(stderr, "Additional subsession \"video/%s\" skipped\n", subsession->codecName());
continue;
}
desiredReceiveBufferSize = 2000000;
} else {
continue;
}
if (rtsp_port)
subsession->setClientPortNum (rtsp_port);
if (!subsession->initiate()) {
fprintf(stderr, "Failed to initiate \"%s/%s\" RTP subsession: %s\n", subsession->mediumName(), subsession->codecName(), env->getResultMsg());
} else {
fprintf(stderr, "Initiated \"%s/%s\" RTP subsession on port %d\n", subsession->mediumName(), subsession->codecName(), subsession->clientPortNum());
// Set the OS's socket receive buffer sufficiently large to avoid
// incoming packets getting dropped between successive reads from this
// subsession's demuxer. Depending on the bitrate(s) that you expect,
// you may wish to tweak the "desiredReceiveBufferSize" values above.
int rtpSocketNum = subsession->rtpSource()->RTPgs()->socketNum();
int receiveBufferSize
= increaseReceiveBufferTo(*env, rtpSocketNum,
desiredReceiveBufferSize);
if (verbose > 0) {
fprintf(stderr, "Increased %s socket receive buffer to %d bytes \n",
subsession->mediumName(), receiveBufferSize);
}
if (rtspClient != NULL) {
// Issue a RTSP "SETUP" command on the chosen subsession:
if (!rtspClient->setupMediaSubsession(*subsession, False,
rtsp_transport_tcp)) break;
if (!strcmp(subsession->mediumName(), "audio"))
audiofound = 1;
if (!strcmp(subsession->mediumName(), "video"))
videofound = 1;
}
}
}
if (rtspClient != NULL) {
// Issue a RTSP aggregate "PLAY" command on the whole session:
if (!rtspClient->playMediaSession(*mediaSession)) break;
} else if (sipClient != NULL) {
sipClient->sendACK(); // to start the stream flowing
}
// Now that the session is ready to be read, do additional
// MPlayer codec-specific initialization on each subsession:
iter.reset();
while ((subsession = iter.next()) != NULL) {
if (subsession->readSource() == NULL) continue; // not reading this
unsigned flags = 0;
if (strcmp(subsession->mediumName(), "audio") == 0) {
rtpState->audioBufferQueue
= new ReadBufferQueue(subsession, demuxer, "audio");
rtpState->audioBufferQueue->otherQueue = &(rtpState->videoBufferQueue);
rtpCodecInitialize_audio(demuxer, subsession, flags);
} else if (strcmp(subsession->mediumName(), "video") == 0) {
rtpState->videoBufferQueue
= new ReadBufferQueue(subsession, demuxer, "video");
rtpState->videoBufferQueue->otherQueue = &(rtpState->audioBufferQueue);
rtpCodecInitialize_video(demuxer, subsession, flags);
}
rtpState->flags |= flags;
}
success = True;
} while (0);
if (!success) return NULL; // an error occurred
// Hack: If audio and video are demuxed together on a single RTP stream,
// then create a new "demuxer_t" structure to allow the higher-level
// code to recognize this:
if (demux_is_multiplexed_rtp_stream(demuxer)) {
stream_t* s = new_ds_stream(demuxer->video);
demuxer_t* od = demux_open(opts, s, DEMUXER_TYPE_UNKNOWN,
opts->audio_id, opts->video_id, opts->sub_id,
NULL);
demuxer = new_demuxers_demuxer(od, od, od);
}
return demuxer;
}
示例9: handSetup
bool MtkRTSPClient::handSetup(char* resultString)
{
CHECK_NULL_COND(session, false);
CHECK_NULL_COND(rtsp::env, false);
bool bSuccess = false;
// Then, setup the "RTPSource"s for the session:
MediaSubsessionIterator iter(*(session));
MediaSubsession *subsession = NULL;
while ((subsession = iter.next()) != NULL)
{
if (subsession->readSource() == NULL)
{
LOG_ERR("warning");
continue; // was not initiated
}
if (subsession->sink != NULL)/*already be set*/
{
continue;
}
unsigned int type = getBufType(subsession);
if (type == 0)
{
LOG_ERR("error type=%d", type);
continue;
}
{
iSetupCount--;
/*set mediay info*/
setMediaInfo(subsession, type);
}
CmpbSink *sink = NULL;
if ((type != mediatype_audio) && (strcmp(subsession->codecName(), "H264") == 0))
{
sink = CmpbH264Sink::createNew(*env, *subsession, type, fileSinkBufferSize);
}
else if ((type == mediatype_audio) &&
((stMediaInfo.audioCodec == MEDIACODEC_AC3) ||
(stMediaInfo.audioCodec == MEDIACODEC_EAC3) ||
(stMediaInfo.audioCodec == MEDIACODEC_MPEG4_GENERIC)))
{
sink = CmpbAACSink::createNew(*env, *subsession, type, fileSinkBufferSize);
}
else if ((type == mediatype_audio) && (stMediaInfo.audioCodec == MEDIACODEC_MP4A_LATM))
{
sink = CmpbLATMSink::createNew(*env, *subsession, type, fileSinkBufferSize);
}
else
{
sink = CmpbSink::createNew(*env, *subsession, type, fileSinkBufferSize);
}
subsession->sink = sink;
if (subsession->sink == NULL)
{
LOG_ERR("error!");
}
else
{
#if 0 /*this should be remove to cmpb sink*/
if ((type != mediatype_audio) && (strcmp(subsession->codecName(), "MP4V-ES") == 0)
&& (subsession->fmtp_config() != NULL))
{
// For MPEG-4 video RTP streams, the 'config' information
// from the SDP description contains useful VOL etc. headers.
// Insert this data at the front of the output file:
unsigned configLen;
unsigned char* configData
= parseGeneralConfigStr(subsession->fmtp_config(), configLen);
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
sink->sendData(configData, configLen, timeNow);
delete[] configData;
}
#endif
subsession->sink->startPlaying(*(subsession->readSource()),
subsessionAfterPlaying,
subsession);
// Also set a handler to be called if a RTCP "BYE" arrives
// for this subsession:
if (subsession->rtcpInstance() != NULL)
{
subsession->rtcpInstance()->setByeHandler(subsessionAfterPlaying, subsession);
}
bSuccess = true;
}
break;
}
if (iSetupCount == 0)
{
mediaInfoReady();
}
//.........这里部分代码省略.........
示例10: handDescription
bool MtkRTSPClient::handDescription(char* resultString)
{
CHECK_NULL_COND(resultString, false);
char* sdpDescription = resultString;
//LOG_DEBUG("SDP description:%s", sdpDescription);
// Create a media session object from this SDP description:
session = MediaSession::createNew(*env, sdpDescription);
if (session == NULL)
{
LOG_ERR("Failed to create a MediaSession object from the SDP description: %s", env->getResultMsg());
return false;
}
if (!session->hasSubsessions())
{
LOG_ERR("This session has no media subsessions (i.e., \"m=\" lines)");
Medium::close(session);
session = NULL;
return false;
}
/*
*TO DO:GET THE TIME RANGE
*/
fStartTime = session->playStartTime();
if (fStartTime < 0)
{
fStartTime = 0.0f;
}
fEndTime= session->playEndTime();
if (fEndTime <= 0)
{
fEndTime = -1.0f;
}
{
/*send setup requesst count*/
iSetupCount = 0;
}
// Then, setup the "RTPSource"s for the session:
MediaSubsessionIterator iter(*(session));
MediaSubsession *subsession = NULL;
RtspReqSender *senderSave = pRtspReqSender->getNext();
if (senderSave == NULL)
{
LOG_ERR("error");
return false;
}
CmdSenderDecorator *senderMove = pRtspReqSender;
while ((subsession = iter.next()) != NULL)
{
if (!subsession->initiate(-1))
{
LOG_ERR("warning");
continue;
}
if (subsession->rtpSource() != NULL)
{
#if 0
// Because we're saving the incoming data, rather than playing
// it in real time, allow an especially large time threshold
// (1 second) for reordering misordered incoming packets:
unsigned const thresh = 1000000; // 1 second
subsession->rtpSource()->setPacketReorderingThresholdTime(thresh);
#endif
#if 0
// Set the RTP source's OS socket buffer size as appropriate - either if we were explicitly asked (using -B),
// or if the desired FileSink buffer size happens to be larger than the current OS socket buffer size.
// (The latter case is a heuristic, on the assumption that if the user asked for a large FileSink buffer size,
// then the input data rate may be large enough to justify increasing the OS socket buffer size also.)
int socketNum = subsession->rtpSource()->RTPgs()->socketNum();
unsigned curBufferSize = getReceiveBufferSize(*env, socketNum);
LOG_DEBUG("old receive buffer size:%d", curBufferSize);
if (fileSinkBufferSize > curBufferSize)
{
unsigned newBufferSize = setReceiveBufferTo(*env, socketNum, fileSinkBufferSize);
LOG_DEBUG("new receive buffer size:%d", newBufferSize);
}
#else
int socketNum = subsession->rtpSource()->RTPgs()->socketNum();
unsigned newBufferSize = setReceiveBufferTo(*env, socketNum, maxBufSize);
LOG_DEBUG("new receive buffer size:%d", newBufferSize);
#endif
}
if (subsession->readSource() == NULL)
{
LOG_ERR("warning");
continue; // was not initiated
}
/*
*TO DO:SET UP SUBSESSION
*/
//.........这里部分代码省略.........
示例11: OpenStream
//.........这里部分代码省略.........
if (desiredPortNum != 0)
{
subsession->setClientPortNum(desiredPortNum);
desiredPortNum += 2;
}
if (createReceivers)
{
if (!subsession->initiate(simpleRTPoffsetArg))
{
XBMC->Log(LOG_DEBUG, "Unable to create receiver for %s %s %s" ,subsession->mediumName(),subsession->codecName(),m_env->getResultMsg());
}
else
{
XBMC->Log(LOG_DEBUG, "Created receiver for type=%s codec=%s ports: %d %d " ,subsession->mediumName(),subsession->codecName(),subsession->clientPortNum(),subsession->clientPortNum()+1 );
madeProgress = True;
if (subsession->rtpSource() != NULL)
{
// Because we're saving the incoming data, rather than playing
// it in real time, allow an especially large time threshold
// (1 second) for reordering misordered incoming packets:
int socketNum= subsession->rtpSource()->RTPgs()->socketNum();
XBMC->Log(LOG_DEBUG, "rtsp:increaseReceiveBufferTo to 2000000 for s:%d",socketNum);
increaseReceiveBufferTo( *m_env, socketNum, 2000000 );
unsigned const thresh = 1000000; // 1 second
subsession->rtpSource()->setPacketReorderingThresholdTime(thresh);
if (socketInputBufferSize > 0)
{
// Set the RTP source's input buffer size as specified:
int socketNum= subsession->rtpSource()->RTPgs()->socketNum();
unsigned curBufferSize= getReceiveBufferSize(*m_env, socketNum);
unsigned newBufferSize= setReceiveBufferTo(*m_env, socketNum, socketInputBufferSize);
XBMC->Log(LOG_DEBUG, "Changed socket receive buffer size for the %s %s %d %d",
subsession->mediumName(),subsession->codecName(),curBufferSize,newBufferSize);
}
}
}
}
else
{
if (subsession->clientPortNum() == 0)
{
XBMC->Log(LOG_DEBUG, "No client port was specified for the %s %s",subsession->mediumName(),subsession->codecName());
}
else
{
madeProgress = True;
}
}
}
if (!madeProgress)
{
shutdown();
return false;
}
// Perform additional 'setup' on each subsession, before playing them:
if (!setupStreams())
{
return false;
}
// Create output files:
// Create and start "FileSink"s for each subsession:
madeProgress = False;
iter.reset();
while ((subsession = iter.next()) != NULL)
{
if (subsession->readSource() == NULL) continue; // was not initiated
// Mediaportal:
CMemorySink* fileSink= CMemorySink::createNew(*m_env, *m_buffer, fileSinkBufferSize);
// XBMC test via file:
//FileSink* fileSink = FileSink::createNew(*m_env, m_outFileName, fileSinkBufferSize, false); //oneFilePerFrame
subsession->sink = fileSink;
if (subsession->sink == NULL)
{
XBMC->Log(LOG_DEBUG, "Failed to create FileSink %s",m_env->getResultMsg());
shutdown();
return false;
}
XBMC->Log(LOG_DEBUG, "Created output sink: %s", m_outFileName);
subsession->sink->startPlaying(*(subsession->readSource()),my_subsessionAfterPlaying,subsession);
// Also set a handler to be called if a RTCP "BYE" arrives
// for this subsession:
if (subsession->rtcpInstance() != NULL)
{
subsession->rtcpInstance()->setByeHandler(my_subsessionByeHandler,subsession);
}
madeProgress = True;
}
return true;
}
示例12: iter
Boolean MediaSession
::initiateByMediaType(char const* mimeType,
MediaSubsession*& resultSubsession,
PrioritizedRTPStreamSelector*& resultMultiSource,
int& resultMultiSourceSessionId,
int useSpecialRTPoffset) {
// Look through this session's subsessions for media that match "mimeType"
resultSubsession = NULL;
resultMultiSource = NULL;
resultMultiSourceSessionId = 0;
unsigned maxStaggerSeconds = 0;
MediaSubsessionIterator iter(*this);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
if (resultMultiSourceSessionId != 0
&& subsession->mctSLAPSessionId() != resultMultiSourceSessionId) {
// We're using a multi-source SLAP session, but this subsession
// isn't part of it
continue;
}
Boolean wasAlreadyInitiated = subsession->readSource() != NULL;
if (!wasAlreadyInitiated) {
// Try to create a source for this subsession:
if (!subsession->initiate(useSpecialRTPoffset)) return False;
}
// Make sure the source's MIME type is one that we handle:
if (strcmp(subsession->readSource()->MIMEtype(), mimeType) != 0) {
if (!wasAlreadyInitiated) subsession->deInitiate();
continue;
}
if (subsession->mctSLAPSessionId() == 0) {
// Normal case: a single session
resultSubsession = subsession;
break; // use this
} else {
// Special case: a multi-source SLAP session
resultMultiSourceSessionId = subsession->mctSLAPSessionId();
unsigned subsessionStaggerSeconds = subsession->mctSLAPStagger();
if (subsessionStaggerSeconds > maxStaggerSeconds) {
maxStaggerSeconds = subsessionStaggerSeconds;
}
}
}
if (resultSubsession == NULL && resultMultiSourceSessionId == 0) {
envir().setResultMsg("Session has no usable media subsession");
return False;
}
if (resultMultiSourceSessionId != 0) {
// We have a multi-source MCT SLAP session; create a selector for it:
unsigned seqNumStagger = computeSeqNumStagger(maxStaggerSeconds);
resultMultiSource
= PrioritizedRTPStreamSelector::createNew(envir(), seqNumStagger);
if (resultMultiSource == NULL) return False;
// Note: each subsession has its own RTCP instance; we don't return them
// Then run through the subsessions again, adding each of the sources:
iter.reset();
while ((subsession = iter.next()) != NULL) {
if (subsession->mctSLAPSessionId() == resultMultiSourceSessionId) {
resultMultiSource->addInputRTPStream(subsession->rtpSource(),
subsession->rtcpInstance());
}
}
}
return True;
}
示例13: main
//.........这里部分代码省略.........
// Create a "QuickTimeFileSink", to write to 'stdout':
qtOut = QuickTimeFileSink::createNew(*env, *session, "stdout",
fileSinkBufferSize,
movieWidth, movieHeight,
movieFPS,
packetLossCompensate,
syncStreams,
generateHintTracks,
generateMP4Format);
if (qtOut == NULL) {
*env << "Failed to create QuickTime file sink for stdout: " << env->getResultMsg();
shutdown();
}
qtOut->startPlaying(sessionAfterPlaying, NULL);
} else if (outputAVIFile) {
// Create an "AVIFileSink", to write to 'stdout':
aviOut = AVIFileSink::createNew(*env, *session, "stdout",
fileSinkBufferSize,
movieWidth, movieHeight,
movieFPS,
packetLossCompensate);
if (aviOut == NULL) {
*env << "Failed to create AVI file sink for stdout: " << env->getResultMsg();
shutdown();
}
aviOut->startPlaying(sessionAfterPlaying, NULL);
} else {
// Create and start "FileSink"s for each subsession:
madeProgress = False;
iter.reset();
while ((subsession = iter.next()) != NULL) {
if (subsession->readSource() == NULL) continue; // was not initiated
// Create an output file for each desired stream:
char outFileName[1000];
if (singleMedium == NULL) {
// Output file name is
// "<filename-prefix><medium_name>-<codec_name>-<counter>"
static unsigned streamCounter = 0;
snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d",
fileNamePrefix, subsession->mediumName(),
subsession->codecName(), ++streamCounter);
} else {
sprintf(outFileName, "stdout");
}
FileSink* fileSink;
if (strcmp(subsession->mediumName(), "audio") == 0 &&
(strcmp(subsession->codecName(), "AMR") == 0 ||
strcmp(subsession->codecName(), "AMR-WB") == 0)) {
// For AMR audio streams, we use a special sink that inserts AMR frame hdrs:
fileSink = AMRAudioFileSink::createNew(*env, outFileName,
fileSinkBufferSize, oneFilePerFrame);
} else if (strcmp(subsession->mediumName(), "video") == 0 &&
(strcmp(subsession->codecName(), "H264") == 0)) {
// For H.264 video stream, we use a special sink that insert start_codes:
fileSink = H264VideoFileSink::createNew(*env, outFileName,
fileSinkBufferSize, oneFilePerFrame);
} else {
// Normal case:
fileSink = FileSink::createNew(*env, outFileName,
fileSinkBufferSize, oneFilePerFrame);
}
subsession->sink = fileSink;
if (subsession->sink == NULL) {