本文整理汇总了C++中GetDuration函数的典型用法代码示例。如果您正苦于以下问题:C++ GetDuration函数的具体用法?C++ GetDuration怎么用?C++ GetDuration使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GetDuration函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: GetDuration
//--------------------------------------------------------------------------------------
// Name: ReadSampleRaw()
// Desc: Reads data from the audio file. No endianness conversion is performed.
//--------------------------------------------------------------------------------------
HRESULT WaveFile::ReadSampleRaw( DWORD dwPosition, VOID* pBuffer,
DWORD dwBufferSize, DWORD* pdwRead ) const
{
// Don't read past the end of the data chunk
DWORD dwDuration;
GetDuration( &dwDuration );
if( dwPosition + dwBufferSize > dwDuration )
dwBufferSize = dwDuration - dwPosition;
HRESULT hr = S_OK;
if( dwBufferSize )
hr = m_DataChunk.ReadData( ( LONG )dwPosition, pBuffer, dwBufferSize, NULL );
if( pdwRead )
*pdwRead = dwBufferSize;
return hr;
}
示例2: GetDuration
Duration
Timeline::GetNaturalDuration (Clock *clock)
{
Duration* d = GetDuration ();
if (*d == Duration::Automatic) {
// printf ("automatic duration, we need to calculate it\n");
Duration cd = GetNaturalDurationCore (clock);
// if (cd.HasTimeSpan ())
// printf (" + duration (%" G_GINT64_FORMAT " timespan)\n", cd.GetTimeSpan ());
// else if (cd == Duration::Automatic)
// printf (" + automatic\n");
// else if (cd == Duration::Forever)
// printf (" + forever\n");
return cd;
}
else {
return *d;
}
}
示例3: GetSpeedRatio
double DoubleAnimation::GetCurrentValue()
{
if (_isEndOf)
{
return 0;
}
double dDiff = (_dTo - _dFrom) * GetSpeedRatio() / GetDuration();
if (_bReverse)
{
_dCurrent -= dDiff;
if ((dDiff > 0 && _dCurrent <= _dFrom)
|| (dDiff < 0 && _dCurrent >= _dFrom))
{
_dCurrent = _dFrom;
_isEndOf = true;
}
}
else
{
_dCurrent += dDiff;
if ((dDiff > 0 && _dCurrent >= _dTo)
|| (dDiff < 0 && _dCurrent <= _dTo))
{
_dCurrent = _dTo;
if (GetAutoReverse())
{
_bReverse = true;
}
else
{
_isEndOf = true;
}
}
}
return _dCurrent;
}
示例4: WXUNUSED
bool EffectDtmf::ProcessInitialize(sampleCount WXUNUSED(totalLen), ChannelNames WXUNUSED(chanMap))
{
double duration = GetDuration();
// all dtmf sequence durations in samples from seconds
// MJS: Note that mDuration is in seconds but will have been quantised to the units of the TTC.
// If this was 'samples' and the project rate was lower than the track rate,
// extra samples may get created as mDuration may now be > mT1 - mT0;
// However we are making our best efforts at creating what was asked for.
auto nT0 = (sampleCount)floor(mT0 * mSampleRate + 0.5);
auto nT1 = (sampleCount)floor((mT0 + duration) * mSampleRate + 0.5);
numSamplesSequence = nT1 - nT0; // needs to be exact number of samples selected
//make under-estimates if anything, and then redistribute the few remaining samples
numSamplesTone = sampleCount( floor(dtmfTone * mSampleRate) );
numSamplesSilence = sampleCount( floor(dtmfSilence * mSampleRate) );
// recalculate the sum, and spread the difference - due to approximations.
// Since diff should be in the order of "some" samples, a division (resulting in zero)
// is not sufficient, so we add the additional remaining samples in each tone/silence block,
// at least until available.
diff = numSamplesSequence - (dtmfNTones*numSamplesTone) - (dtmfNTones-1)*numSamplesSilence;
while (diff > 2*dtmfNTones - 1) { // more than one per thingToBeGenerated
// in this case, both numSamplesTone and numSamplesSilence would change, so it makes sense
// to recalculate diff here, otherwise just keep the value we already have
// should always be the case that dtmfNTones>1, as if 0, we don't even start processing,
// and with 1 there is no difference to spread (no silence slot)...
wxASSERT(dtmfNTones > 1);
numSamplesTone += (diff/(dtmfNTones));
numSamplesSilence += (diff/(dtmfNTones-1));
diff = numSamplesSequence - (dtmfNTones*numSamplesTone) - (dtmfNTones-1)*numSamplesSilence;
}
wxASSERT(diff >= 0); // should never be negative
curSeqPos = -1; // pointer to string in dtmfSequence
isTone = false;
numRemaining = 0;
return true;
}
示例5: LOG
void CMMAMMFPlayerBase::GetMediaTime(TInt64* aMediaTime)
{
LOG(EJavaMMAPI, EInfo, "CMMAMMFPlayerBase::GetMediaTime +");
TTimeIntervalMicroSeconds position(0);
if (iMediaTime == KTimeUnknown || iState == EStarted)
{
// The controller must be in the PRIMED or PLAYING state
TInt error(iController.GetPosition(position));
if (error == KErrNone)
{
TInt64 newTime = position.Int64();
LOG1(EJavaMMAPI, EInfo, "CMMAMMFPlayerBase::GetMediaTime iController.GetPosition : %d", newTime);
// Sanity check for media time going backwards or beyond the
// duration.
// Some native controls may return zero media time for
// a few moments just before playback will complete.
if (newTime < iMediaTime ||
(iDuration > 0 && newTime > iDuration))
{
LOG(EJavaMMAPI, EInfo, "CMMAMMFPlayerBase::GetMediaTime.GetDuration ");
GetDuration(&iMediaTime);
}
else
{
LOG(EJavaMMAPI, EInfo, "CMMAMMFPlayerBase::GetMediaTime.else ");
// set return value
iMediaTime = newTime;
}
}
else
{
LOG1(EJavaMMAPI, EInfo, "CMMAMMFPlayerBase::GetMediaTime: error=%d, returning TIME_UNKNOWN", error);
// cannot get media time
iMediaTime = KTimeUnknown;
}
}
*aMediaTime = iMediaTime;
LOG1(EJavaMMAPI, EInfo, "CMMAMMFPlayerBase::GetMediaTime - %d", *aMediaTime);
}
示例6: GetDuration
void CAVISplitter::GetSeekingParams(REFERENCE_TIME* ptStart, REFERENCE_TIME* ptStop, double* pdRate)
{
if (ptStart != NULL)
{
*ptStart = m_tStart;
}
if (ptStop != NULL)
{
if (m_tStop == MAX_TIME)
{
m_tStop = GetDuration();
}
*ptStop = m_tStop;
}
if (pdRate != NULL)
{
*pdRate = m_dRate;
}
}
示例7: lock
HRESULT CAVISplitter::Seek(REFERENCE_TIME tStart, REFERENCE_TIME tStop, double dRate)
{
// We must stop the pushing thread before we change
// the seek parameters -- especially for VBR seeking,
// where we might re-seek based on the incoming data.
CAutoLock lock(&m_csSeeking);
if (tStop == 0 || tStop < tStart)
tStop = GetDuration();
bool bShouldRestart = Suspend();
m_tStart = tStart;
m_tStop = tStop;
m_dRate = dRate;
m_scanner.Seek(m_tStart);
if (bShouldRestart)
Resume();
return S_OK;
}
示例8: GetDuration
STDMETHODIMP IKGSTAudioPlayer::SetPosition(STREAM_TIME pos)//0-100
{
if(!pipeline)
return E_FAIL;
gint64 time;
GstElement *p = pipeline;
gint64 Glength = GetDuration(), Gstart = 0;
if (Glength)
{
time = Glength;
time *= (double)pos/100;
time += Gstart;
bool b = gst_element_seek(p, 1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH, GST_SEEK_TYPE_SET, time, GST_SEEK_TYPE_SET, Gstart + Glength);
return b ? S_OK : E_FAIL;
}
else
return E_FAIL;
}
示例9: UpdateRspBuf
//=======================================================
// FUNCTIONS: [email protected]
// PURPOSE: Update All Kinds Of Buffers
//=======================================================
int UpdateRspBuf(MobileNode *node) {
WGN_802_11_Mac_Frame *frame;
unsigned int timeout = 0;
int subtype;
if (PktRspIsEmpty(node) == TRUE)
return -1;
if (GetTxStatus(node) == MAC_CTS || GetTxStatus(node) == MAC_ACK)
return -1;
frame = node->nodeMac->pktRsp;
subtype = GetFcSubtype (frame);
switch(subtype) {
case MAC_SUBTYPE_CTS:
if( CheckIfChannelIdle(node) == FALSE ) {
Dot11FrameFree(frame);
node->nodeMac->pktRsp=NULL;
return 0;
}
SetTxStatus(node,MAC_CTS);
//Set timeout period.
timeout = UsecToNsec(GetDuration(frame))+ SecToNsec(CTS_Time);
break;
//IEEE 802.11 specs, section 9.2.8
//Acknowledment should be sent after an SIFS, regardless to
//the busy/idle state of the medium.
case MAC_SUBTYPE_ACK:
SetTxStatus(node,MAC_ACK);
timeout = SecToNsec(ACK_Time);
break;
default:
printf ("[UpdateRspBuf]:: Error, Invalid frame subtype!\n");
exit(1);
break;
}
//printf("In UpdateRspBuf ..\n");
MacSendPkt(node, frame, timeout);
return 0;
}
示例10: CreateSourceReaderAttribute
HRESULT MFMovieSource::Open( const wchar_t* pszFilePath )
{
HRESULT hr;
hr=MFCreateSourceReaderFromURL( pszFilePath, CreateSourceReaderAttribute(), &_reader );
PKY_IF_ERR_RETURN( hr, "Create SourceReader Failed" );
hr=GetDuration( _reader, m_Duration );
PKY_IF_ERR_RETURN( hr, "GetDuration Failed" );
hr=GetSourceFlags( _reader, &m_SeekingFlag );
hr=EnumerateTypsForStream( _reader,(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM, [this]( IMFMediaType*pType )->bool {
UINT32 wd;
UINT32 ht;
if( SUCCEEDED( GetFrameSize( pType, &wd, &ht ) ) )
{
this->_wd=wd;
this->_ht=ht;
}
if( SUCCEEDED( GetKeyFrameDelta( pType, &this->_keyFrameDelta ) ) )
{
}
uint32_t numer, denum;
if( SUCCEEDED(GetFrameRate(pType, numer, denum)) )
{
this->_numer = numer;
this->_denum = denum;
}
return true;
} );
_frameCount = (UINT64)(m_Duration * _numer/_denum/10000000.0 + 0.001);
hr=ConfigureDecoder( _reader, (DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM );
hr=ConfigureDecoder( _reader, (DWORD)MF_SOURCE_READER_FIRST_AUDIO_STREAM );
//Jump(0);
return E_NOTIMPL;
}
示例11: STDMETHODIMP_
STDMETHODIMP_(STREAM_TIME) IKGSTAudioPlayer::GetPosition()//0-100
{
if(!pipeline)
return 0;
GstElement *p = pipeline;
gint64 Glength = GetDuration(), Gstart = 0;
gint64 pos;
GstFormat fmt = GST_FORMAT_TIME;
if (Glength && gst_element_query_position(p, &fmt, &pos))
{
pos -= Gstart;
pos *= 100;
pos /= Glength;
return pos;//pos/100
}
else
return 0;
}
示例12: TEXT
FString USoundCue::GetDesc()
{
FString Description = TEXT( "" );
// Display duration
const float Duration = GetDuration();
if( Duration < INDEFINITELY_LOOPING_DURATION )
{
Description = FString::Printf( TEXT( "%3.2fs" ), Duration );
}
else
{
Description = TEXT( "Forever" );
}
// Display group
Description += TEXT( " [" );
Description += *GetSoundClass()->GetName();
Description += TEXT( "]" );
return Description;
}
示例13: GetCurrentThreadId
//******************************************************
/// Called when thread is about to start delivering data to the codec
///
HRESULT CVideoPin::OnThreadStartPlay()
{
DWORD thrdID = GetCurrentThreadId();
LogDebug("vidPin:OnThreadStartPlay(%f), rate:%02.2f, threadID:0x%x, GET_TIME_NOW:0x%x", (float)m_rtStart.Millisecs()/1000.0f, m_dRateSeeking, thrdID, GET_TIME_NOW());
//set discontinuity flag indicating to codec that the new data
//is not belonging to any previous data
m_bDiscontinuity=TRUE;
m_bPresentSample=false;
m_delayedDiscont = 0;
m_FillBuffSleepTime = 1;
m_LastFillBuffTime = GET_TIME_NOW();
m_sampleCount = 0;
m_bInFillBuffer=false;
m_pTsReaderFilter->m_ShowBufferVideo = INIT_SHOWBUFFERVIDEO;
m_llLastComp = 0;
m_llLastMTDts = 0;
m_nNextMTD = 0;
m_fMTDMean = 0;
m_llMTDSumAvg = 0;
ZeroMemory((void*)&m_pllMTD, sizeof(REFERENCE_TIME) * NB_MTDSIZE);
//get file-duration and set m_rtDuration
GetDuration(NULL);
if( !m_bPinNoNewSegFlush ) //MS DTV video decoder can hang if we flush here...
{
//Downstream flush
DeliverBeginFlush();
DeliverEndFlush();
}
//start playing
DeliverNewSegment(m_rtStart, m_rtStop, m_dRateSeeking);
return CSourceStream::OnThreadStartPlay( );
}
示例14: Validate
bool CCinemaPath::Validate()
{
if ( m_TimeElapsed <= GetDuration() && m_TimeElapsed >= 0.0f )
{
//Find current node and past "node time"
float previousTime = 0.0f, cumulation = 0.0f;
//Ignore the last node, since it is a blank (node time values are shifted down one from interface)
for ( size_t i = 0; i < Node.size() - 1; ++i )
{
cumulation += Node[i].Distance;
if ( m_TimeElapsed <= cumulation )
{
m_PreviousNodeTime = previousTime;
m_PreviousRotation = Node[i].Rotation;
m_CurrentNode = i; //We're moving toward this next node, so use its rotation
return true;
}
else
previousTime += Node[i].Distance;
}
}
return false;
}
示例15: GetInterpolate
/*--------------------------------------------------------------------------------*/
bool AudioObjectParameters::GetJumpPosition(bool& jumpPosition, double *interpolationLength) const
{
bool interpolate = false;
bool valid = GetInterpolate(interpolate);
if (valid)
{
jumpPosition = (!interpolate || (interpolate && (GetInterpolationTime() != GetDuration())));
if (interpolationLength) *interpolationLength = interpolate ? GetInterpolationTimeS() : 0.0;
}
else
{
jumpPosition = false;
if (interpolationLength) *interpolationLength = 0.0;
}
BBCDEBUG3(("GetJumpPosition(): %s/%s -> %s/%s",
StringFrom(GetInterpolate()).c_str(),
StringFrom(GetInterpolationTimeS()).c_str(),
StringFrom(jumpPosition).c_str(),
interpolationLength ? StringFrom(*interpolationLength).c_str() : "<notset>"));
return valid;
}