本文整理汇总了C++中BBuffer::Header方法的典型用法代码示例。如果您正苦于以下问题:C++ BBuffer::Header方法的具体用法?C++ BBuffer::Header怎么用?C++ BBuffer::Header使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BBuffer
的用法示例。
在下文中一共展示了BBuffer::Header方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: if
void
ProducerNode::BufferProducer()
{
// this thread produces one buffer each two seconds,
// and shedules it to be handled one second later than produced
// assuming a realtime timesource
status_t rv;
for (;;) {
rv = acquire_sem_etc(mBufferProducerSem,1,B_RELATIVE_TIMEOUT,DELAY);
if (rv == B_INTERRUPTED) {
continue;
} else if (rv == B_OK) {
// triggered by AdditionalBufferRequested
release_sem(mBufferProducerSem);
} else if (rv != B_TIMED_OUT) {
// triggered by deleting the semaphore (stop request)
break;
}
if (!mOutputEnabled)
continue;
BBuffer *buffer;
// out("ProducerNode: RequestBuffer\n");
buffer = mBufferGroup->RequestBuffer(2048);
if (!buffer) {
}
buffer->Header()->start_time = TimeSource()->Now() + DELAY / 2;
out("ProducerNode: SendBuffer, sheduled time = %5.4f\n",buffer->Header()->start_time / 1E6);
rv = SendBuffer(buffer, mOutput.destination);
if (rv != B_OK) {
}
}
}
示例2: calcProcessingLatency
// figure processing latency by doing 'dry runs' of filterBuffer()
bigtime_t StepMotionBlurFilter::calcProcessingLatency() {
PRINT(("StepMotionBlurFilter::calcProcessingLatency()\n"));
if(m_output.destination == media_destination::null) {
PRINT(("\tNot connected.\n"));
return 0LL;
}
// allocate a temporary buffer group
BBufferGroup* pTestGroup = new BBufferGroup(m_output.format.u.raw_video.display.line_width * m_output.format.u.raw_video.display.line_count *4, 1);
// fetch a buffer
BBuffer* pBuffer = pTestGroup->RequestBuffer(m_output.format.u.raw_video.display.line_width * m_output.format.u.raw_video.display.line_count * 4);
ASSERT(pBuffer);
pBuffer->Header()->type = B_MEDIA_RAW_VIDEO;
pBuffer->Header()->size_used = m_output.format.u.raw_video.display.line_width * m_output.format.u.raw_video.display.line_count * 4;
// run the test
bigtime_t preTest = system_time();
filterBuffer(pBuffer);
bigtime_t elapsed = system_time()-preTest;
// clean up
pBuffer->Recycle();
delete pTestGroup;
// reset filter state
initFilter();
return elapsed;
}
示例3: BBufferGroup
bigtime_t
EqualizerNode::GetFilterLatency(void)
{
if (fOutputMedia.destination == media_destination::null)
return 0LL;
BBufferGroup* test_group =
new BBufferGroup(fOutputMedia.format.u.raw_audio.buffer_size, 1);
BBuffer* buffer =
test_group->RequestBuffer(fOutputMedia.format.u.raw_audio.buffer_size);
buffer->Header()->type = B_MEDIA_RAW_AUDIO;
buffer->Header()->size_used = fOutputMedia.format.u.raw_audio.buffer_size;
bigtime_t begin = system_time();
FilterBuffer(buffer);
bigtime_t latency = system_time() - begin;
buffer->Recycle();
delete test_group;
InitFilter();
return latency;
}
示例4: calcProcessingLatency
// figure processing latency by doing 'dry runs' of filterBuffer()
bigtime_t FlangerNode::calcProcessingLatency() {
PRINT(("FlangerNode::calcProcessingLatency()\n"));
if(m_output.destination == media_destination::null) {
PRINT(("\tNot connected.\n"));
return 0LL;
}
// allocate a temporary buffer group
BBufferGroup* pTestGroup = new BBufferGroup(
m_output.format.u.raw_audio.buffer_size, 1);
// fetch a buffer
BBuffer* pBuffer = pTestGroup->RequestBuffer(
m_output.format.u.raw_audio.buffer_size);
ASSERT(pBuffer);
pBuffer->Header()->type = B_MEDIA_RAW_AUDIO;
pBuffer->Header()->size_used = m_output.format.u.raw_audio.buffer_size;
// run the test
bigtime_t preTest = system_time();
filterBuffer(pBuffer);
bigtime_t elapsed = system_time()-preTest;
// clean up
pBuffer->Recycle();
delete pTestGroup;
// reset filter state
initFilter();
return elapsed;
}
示例5: BufferDuration
BBuffer*
SoundPlayNode::FillNextBuffer(bigtime_t eventTime)
{
CALLED();
// get a buffer from our buffer group
BBuffer* buffer = fBufferGroup->RequestBuffer(
fOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2);
// If we fail to get a buffer (for example, if the request times out), we
// skip this buffer and go on to the next, to avoid locking up the control
// thread
if (buffer == NULL) {
ERROR("SoundPlayNode::FillNextBuffer: RequestBuffer failed\n");
return NULL;
}
if (fPlayer->HasData()) {
fPlayer->PlayBuffer(buffer->Data(),
fOutput.format.u.raw_audio.buffer_size, fOutput.format.u.raw_audio);
} else
memset(buffer->Data(), 0, fOutput.format.u.raw_audio.buffer_size);
// fill in the buffer header
media_header* header = buffer->Header();
header->type = B_MEDIA_RAW_AUDIO;
header->size_used = fOutput.format.u.raw_audio.buffer_size;
header->time_source = TimeSource()->ID();
header->start_time = eventTime;
return buffer;
}
示例6: HandleBuffer
// how should we handle late buffers? drop them?
// notify the producer?
status_t ESDSinkNode::HandleBuffer(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent)
{
CALLED();
BBuffer * buffer = const_cast<BBuffer*>((BBuffer*)event->pointer);
if (buffer == 0) {
fprintf(stderr,"<- B_BAD_VALUE\n");
return B_BAD_VALUE;
}
if(fInput.destination.id != buffer->Header()->destination) {
fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION\n");
return B_MEDIA_BAD_DESTINATION;
}
media_header* hdr = buffer->Header();
bigtime_t now = TimeSource()->Now();
bigtime_t perf_time = hdr->start_time;
// the how_early calculate here doesn't include scheduling latency because
// we've already been scheduled to handle the buffer
bigtime_t how_early = perf_time - EventLatency() - now;
// if the buffer is late, we ignore it and report the fact to the producer
// who sent it to us
if ((RunMode() != B_OFFLINE) && // lateness doesn't matter in offline mode...
(RunMode() != B_RECORDING) && // ...or in recording mode
(how_early < 0LL))
{
//mLateBuffers++;
NotifyLateProducer(fInput.source, -how_early, perf_time);
fprintf(stderr," <- LATE BUFFER : %lli\n", how_early);
buffer->Recycle();
} else {
if (fDevice->CanSend())
fDevice->Write(buffer->Data(), buffer->SizeUsed());
}
return B_OK;
}
示例7: BBufferGroup
void
FireWireDVNode::card_reader_thread()
{
status_t err;
size_t rbufsize;
int rcount;
fCard->GetBufInfo(&rbufsize, &rcount);
delete fBufferGroupEncVideo;
fBufferGroupEncVideo = new BBufferGroup(rbufsize, rcount);
while (!fTerminateThreads) {
void *data, *end;
ssize_t sizeUsed = fCard->Read(&data);
if (sizeUsed < 0) {
TRACE("FireWireDVNode::%s: %s\n", __FUNCTION__,
strerror(sizeUsed));
continue;
}
end = (char*)data + sizeUsed;
while (data < end) {
BBuffer* buf = fBufferGroupEncVideo->RequestBuffer(rbufsize, 10000);
if (!buf) {
TRACE("OutVideo: request buffer timout\n");
continue;
}
err = fCard->Extract(buf->Data(), &data, &sizeUsed);
if (err) {
buf->Recycle();
printf("OutVideo Extract error %s\n", strerror(err));
continue;
}
media_header* hdr = buf->Header();
hdr->type = B_MEDIA_ENCODED_VIDEO;
hdr->size_used = sizeUsed;
hdr->time_source = TimeSource()->ID(); // set time source id
//what should the start_time be?
hdr->start_time = TimeSource()->PerformanceTimeFor(system_time());
fLock.Lock();
if (SendBuffer(buf, fOutputEncVideo.source,
fOutputEncVideo.destination) != B_OK) {
TRACE("OutVideo: sending buffer failed\n");
buf->Recycle();
}
fLock.Unlock();
}
}
}
示例8: BufferDuration
BBuffer*
GameProducer::FillNextBuffer(bigtime_t event_time)
{
// get a buffer from our buffer group
BBuffer* buf = fBufferGroup->RequestBuffer(fBufferSize, BufferDuration());
// if we fail to get a buffer (for example, if the request times out), we
// skip this buffer and go on to the next, to avoid locking up the control
// thread.
if (!buf)
return NULL;
// we need to discribe the buffer
int64 frames = int64(fBufferSize / fFrameSize);
memset(buf->Data(), 0, fBufferSize);
// now fill the buffer with data, continuing where the last buffer left off
fObject->Play(buf->Data(), frames);
// fill in the buffer header
media_header* hdr = buf->Header();
hdr->type = B_MEDIA_RAW_AUDIO;
hdr->size_used = fBufferSize;
hdr->time_source = TimeSource()->ID();
bigtime_t stamp;
if (RunMode() == B_RECORDING) {
// In B_RECORDING mode, we stamp with the capture time. We're not
// really a hardware capture node, but we simulate it by using the
// (precalculated) time at which this buffer "should" have been created.
stamp = event_time;
} else {
// okay, we're in one of the "live" performance run modes. in these
// modes, we stamp the buffer with the time at which the buffer should
// be rendered to the output, not with the capture time. fStartTime is
// the cached value of the first buffer's performance time; we calculate
// this buffer's performance time as an offset from that time, based on
// the amount of media we've created so far.
// Recalculating every buffer like this avoids accumulation of error.
stamp = fStartTime + bigtime_t(double(fFramesSent)
/ double(fOutput.format.u.raw_audio.frame_rate) * 1000000.0);
}
hdr->start_time = stamp;
return buf;
}
示例9: calcProcessingLatency
// figure processing latency by doing 'dry runs' of processBuffer()
bigtime_t AudioFilterNode::calcProcessingLatency() {
PRINT(("AudioFilterNode::calcProcessingLatency()\n"));
ASSERT(m_input.source != media_source::null);
ASSERT(m_output.destination != media_destination::null);
ASSERT(m_op);
// initialize filter
m_op->init();
size_t maxSize = max_c(
m_input.format.u.raw_audio.buffer_size,
m_output.format.u.raw_audio.buffer_size);
// allocate a temporary buffer group
BBufferGroup* testGroup = new BBufferGroup(
maxSize, 1);
// fetch a buffer big enough for in-place processing
BBuffer* buffer = testGroup->RequestBuffer(
maxSize, -1);
ASSERT(buffer);
buffer->Header()->type = B_MEDIA_RAW_AUDIO;
buffer->Header()->size_used = m_input.format.u.raw_audio.buffer_size;
// run the test
bigtime_t preTest = system_time();
processBuffer(buffer, buffer);
bigtime_t elapsed = system_time()-preTest;
// clean up
buffer->Recycle();
delete testGroup;
// reset filter state
m_op->init();
return elapsed;// + 100000LL;
}
示例10: TimeSource
BBuffer*
ClientNode::FillNextBuffer(bigtime_t eventTime, JackPort* port)
{
//printf("FillNextBuffer\n");
BBuffer* buffer = port->CurrentBuffer();
media_header* header = buffer->Header();
header->type = B_MEDIA_RAW_AUDIO;
header->size_used = fFormat.u.raw_audio.buffer_size;
header->time_source = TimeSource()->ID();
bigtime_t start;
if (RunMode() == B_RECORDING)
start = eventTime;
else
start = fTime + bigtime_t(double(fFramesSent)
/ double(fFormat.u.raw_audio.frame_rate) * 1000000.0);
header->start_time = start;
return buffer;
}
示例11: BufferDuration
BBuffer*
ToneProducer::FillNextBuffer(bigtime_t event_time)
{
// get a buffer from our buffer group
BBuffer* buf = mBufferGroup->RequestBuffer(mOutput.format.u.raw_audio.buffer_size, BufferDuration());
// if we fail to get a buffer (for example, if the request times out), we skip this
// buffer and go on to the next, to avoid locking up the control thread
if (!buf)
{
return NULL;
}
// now fill it with data, continuing where the last buffer left off
// 20sep99: multichannel support
size_t numFrames =
mOutput.format.u.raw_audio.buffer_size /
(sizeof(float)*mOutput.format.u.raw_audio.channel_count);
bool stereo = (mOutput.format.u.raw_audio.channel_count == 2);
if(!stereo) {
ASSERT(mOutput.format.u.raw_audio.channel_count == 1);
}
// PRINT(("buffer: %ld, %ld frames, %s\n", mOutput.format.u.raw_audio.buffer_size, numFrames, stereo ? "stereo" : "mono"));
float* data = (float*) buf->Data();
switch (mWaveform)
{
case SINE_WAVE:
FillSineBuffer(data, numFrames, stereo);
break;
case TRIANGLE_WAVE:
FillTriangleBuffer(data, numFrames, stereo);
break;
case SAWTOOTH_WAVE:
FillSawtoothBuffer(data, numFrames, stereo);
break;
}
// fill in the buffer header
media_header* hdr = buf->Header();
hdr->type = B_MEDIA_RAW_AUDIO;
hdr->size_used = mOutput.format.u.raw_audio.buffer_size;
hdr->time_source = TimeSource()->ID();
bigtime_t stamp;
if (RunMode() == B_RECORDING)
{
// In B_RECORDING mode, we stamp with the capture time. We're not
// really a hardware capture node, but we simulate it by using the (precalculated)
// time at which this buffer "should" have been created.
stamp = event_time;
}
else
{
// okay, we're in one of the "live" performance run modes. in these modes, we
// stamp the buffer with the time at which the buffer should be rendered to the
// output, not with the capture time. mStartTime is the cached value of the
// first buffer's performance time; we calculate this buffer's performance time as
// an offset from that time, based on the amount of media we've created so far.
// Recalculating every buffer like this avoids accumulation of error.
stamp = mStartTime + bigtime_t(double(mFramesSent) / double(mOutput.format.u.raw_audio.frame_rate) * 1000000.0);
}
hdr->start_time = stamp;
return buf;
}
示例12: BufferReceived
void AudioFilterNode::BufferReceived(
BBuffer* buffer) {
ASSERT(buffer);
// check buffer destination
if(buffer->Header()->destination !=
m_input.destination.id) {
PRINT(("AudioFilterNode::BufferReceived():\n"
"\tBad destination.\n"));
buffer->Recycle();
return;
}
if(buffer->Header()->time_source != TimeSource()->ID()) { // +++++ no-go in offline mode
PRINT(("* timesource mismatch\n"));
}
// check output
if(m_output.destination == media_destination::null ||
!m_outputEnabled) {
buffer->Recycle();
return;
}
// // +++++ [9sep99]
// bigtime_t now = TimeSource()->Now();
// bigtime_t delta = now - m_tpLastReceived;
// m_tpLastReceived = now;
// PRINT((
// "### delta: %Ld (%Ld)\n",
// delta, buffer->Header()->start_time - now));
// fetch outbound buffer if needed
BBuffer* outBuffer;
if(m_bufferGroup) {
outBuffer = m_bufferGroup->RequestBuffer(
m_output.format.u.raw_audio.buffer_size, -1);
ASSERT(outBuffer);
// prepare outbound buffer
outBuffer->Header()->type = B_MEDIA_RAW_AUDIO;
// copy start time info from upstream node
// +++++ is this proper, or should the next buffer-start be
// continuously tracked (figured from Start() or the first
// buffer received?)
outBuffer->Header()->time_source = buffer->Header()->time_source;
outBuffer->Header()->start_time = buffer->Header()->start_time;
}
else {
// process inplace
outBuffer = buffer;
}
// process and retransmit buffer
processBuffer(buffer, outBuffer);
status_t err = SendBuffer(outBuffer, m_output.source, m_output.destination);
if (err < B_OK) {
PRINT(("AudioFilterNode::BufferReceived():\n"
"\tSendBuffer() failed: %s\n", strerror(err)));
outBuffer->Recycle();
}
// free inbound buffer if data was copied
if(buffer != outBuffer)
buffer->Recycle();
// //####resend
// SendBuffer(buffer, m_output.destination);
// sent!
}
示例13: _
//.........这里部分代码省略.........
case B_TIMED_OUT:
TRACE("_FrameGeneratorThread: timed out => event\n");
// Catch the cases in which the node manager could not be
// locked and we therefore have no valid data to work with,
// or the producer is not running or enabled.
if (ignoreEvent || !fRunning || !fEnabled) {
TRACE("_FrameGeneratorThread: ignore event\n");
// nothing to do
} else if (!forceSendingBuffer
&& nextWaitUntil < system_time() - fBufferLatency
&& droppedFrames < kMaxDroppedFrames) {
// Drop frame if it's at least a frame late.
if (playingDirection > 0)
printf("VideoProducer: dropped frame (%Ld)\n", fFrame);
// next frame
droppedFrames++;
fFrame++;
} else if (playingDirection != 0 || forceSendingBuffer) {
// Send buffers only, if playing, the node is running and
// the output has been enabled
TRACE("_FrameGeneratorThread: produce frame\n");
BAutolock _(fLock);
// Fetch a buffer from the buffer group
fUsedBufferGroup->WaitForBuffers();
BBuffer* buffer = fUsedBufferGroup->RequestBuffer(
fConnectedFormat.display.bytes_per_row
* fConnectedFormat.display.line_count, 0LL);
if (buffer == NULL) {
// Wait until a buffer becomes available again
ERROR("_FrameGeneratorThread: no buffer!\n");
break;
}
// Fill out the details about this buffer.
media_header* h = buffer->Header();
h->type = B_MEDIA_RAW_VIDEO;
h->time_source = TimeSource()->ID();
h->size_used = fConnectedFormat.display.bytes_per_row
* fConnectedFormat.display.line_count;
// For a buffer originating from a device, you might
// want to calculate this based on the
// PerformanceTimeFor the time your buffer arrived at
// the hardware (plus any applicable adjustments).
h->start_time = fPerformanceTimeBase + performanceTime;
h->file_pos = 0;
h->orig_size = 0;
h->data_offset = 0;
h->u.raw_video.field_gamma = 1.0;
h->u.raw_video.field_sequence = fFrame;
h->u.raw_video.field_number = 0;
h->u.raw_video.pulldown_number = 0;
h->u.raw_video.first_active_line = 1;
h->u.raw_video.line_count
= fConnectedFormat.display.line_count;
// Fill in a frame
TRACE("_FrameGeneratorThread: frame: %Ld, "
"playlistFrame: %Ld\n", fFrame, playlistFrame);
bool wasCached = false;
err = fSupplier->FillBuffer(playlistFrame,
buffer->Data(), fConnectedFormat, forceSendingBuffer,
wasCached);
if (err == B_TIMED_OUT) {
// Don't send the buffer if there was insufficient
// time for rendering, this will leave the last
// valid frame on screen until we catch up, instead
// of going black.
wasCached = true;
示例14: TimeSource
void
LoggingConsumer::HandleEvent(const media_timed_event *event, bigtime_t /* lateness */, bool /* realTimeEvent */)
{
log_message logMsg;
logMsg.now = TimeSource()->Now();
mLogger->Log(LOG_HANDLE_EVENT, logMsg);
switch (event->type)
{
case BTimedEventQueue::B_HANDLE_BUFFER:
{
BBuffer* buffer = const_cast<BBuffer*>((BBuffer*) event->pointer);
if (buffer)
{
media_header* hdr = buffer->Header();
if (hdr->destination == mInput.destination.id)
{
bigtime_t now = TimeSource()->Now();
bigtime_t perf_time = hdr->start_time;
// the how_early calculated here doesn't include scheduling latency because
// we've already been scheduled to handle the buffer
bigtime_t how_early = perf_time - mLatency - now;
// logMsg.now is already set
logMsg.buffer_data.start_time = perf_time;
logMsg.buffer_data.offset = how_early;
mLogger->Log(LOG_BUFFER_HANDLED, logMsg);
// if the buffer is late, we ignore it and report the fact to the producer
// who sent it to us
if (how_early < 0)
{
mLateBuffers++;
NotifyLateProducer(mInput.source, -how_early, perf_time);
}
else
{
// burn some percentage of our stated latency in CPU time (controlled by
// a BParameter). this simulates a user-configurable amount of CPU cost
// associated with the consumer.
bigtime_t spin_start = ::system_time();
bigtime_t spin_now = spin_start;
bigtime_t usecToSpin = bigtime_t(mSpinPercentage / 100.0 * mLatency);
while (spin_now - spin_start < usecToSpin)
{
for (long k = 0; k < 1000000; k++) { /* intentionally blank */ }
spin_now = ::system_time();
}
}
// we're done "processing the buffer;" now we recycle it and return to the loop
buffer->Recycle();
}
else
{
//fprintf(stderr, "* Woah! Got a buffer for a different destination!\n");
}
}
}
break;
// !!! change to B_PARAMETER as soon as it's available
// +++++ e.moon [16jun99]
// !!! this can't be right: the parameter value is accessed by the pointer
// originally passed to SetParameterValue(). there's no guarantee that
// value's still valid, is there?
case BTimedEventQueue::B_USER_EVENT:
{
size_t dataSize = size_t(event->data);
int32 param = int32(event->bigdata);
logMsg.param.id = param;
// handle the message if there's sufficient data provided. we only check against
// sizeof(float) because all of our parameters happen to be 4 bytes. if various
// parameters took different amounts of data, we'd check the size on a per-parameter
// basis.
if (dataSize >= sizeof(float)) switch (param)
{
case LATENCY_PARAM:
{
float value = *((float*) event->pointer);
mLatency = bigtime_t(value* 1000);
mLastLatencyChange = logMsg.now;
// my latency just changed, so reconfigure the BMediaEventLooper
// to give me my events at the proper time
SetEventLatency(mLatency);
// tell the producer that my latency changed, and broadcast a message
// about the parameter change to any applications that may be looking
// for it through the BMediaRoster::StartWatching() mechanism.
//
// if we had more than one input, we'd need to tell *all* producers about
// the change in our latency.
SendLatencyChange(mInput.source, mInput.destination, EventLatency() + SchedulingLatency());
BroadcastNewParameterValue(logMsg.now, param, &value, sizeof(value));
//.........这里部分代码省略.........
示例15: TimeSource
void
TVideoPreviewView::DisplayThread()
{
FUNCTION("TVideoPreviewView::DisplayThread\n");
bigtime_t timeout = 5000;
bigtime_t realTimeNow = 0;
bigtime_t perfTimeNow = 0;
bigtime_t halfPeriod = (bigtime_t) (500000./29.97);
bool timeSourceRunning = false;
while (!mDisplayQuit) {
if (acquire_sem(mServiceLock) == B_NO_ERROR) {
timeSourceRunning = TimeSource()->IsRunning();
realTimeNow = BTimeSource::RealTime();
perfTimeNow = TimeSource()->Now();
release_sem(mServiceLock);
}
snooze(timeout);
if (timeSourceRunning) {
// if we received a Stop, deal with it
if (mStopping) {
PROGRESS("VidConsumer::DisplayThread - STOP\n");
if (perfTimeNow >= mStopTime) {
mRunning = false;
mStopping = false;
// deal with any pending Seek
if (mSeeking)
mSeeking = false;
//if (mConnected)
// SendDataStatus(B_DATA_NOT_AVAILABLE, mConnections[0], mStopTime);
continue;
}
}
// if we received a Seek, deal with it
if (mSeeking) {
PROGRESS("VidConsumer::DisplayThread - SEEK\n");
if (perfTimeNow >= mSeekTime) {
PROGRESS("VidConsumer::DisplayThread - DO SEEK\n");
mSeeking = false;
mDeltaTime = mMediaTime;
continue;
}
}
// if we received a Start, deal with it
if (mStarting) {
PROGRESS("BBt848Controllable::CaptureRun mStartTime = %.4f TimeNow = %.4f\n", (double)mStartTime/M1, (double)perfTimeNow/M1);
if (perfTimeNow >= mStartTime) {
mRunning = true;
mStarting = false;
mDeltaTime = mStartTime;
//if (mConnected)
// SendDataStatus(B_DATA_AVAILABLE, mConnections[0], mStartTime);
continue;
}
}
if (mRunning) {
// check for buffer available.
status_t err = acquire_sem_etc(mBufferAvailable, 1, B_TIMEOUT, halfPeriod * 2);
if (err == B_TIMED_OUT || !mConnected) {
ERROR("VidConsumer::DisplayThread - Error from acquire_sem_etc: 0x%lx\n", err);
continue;
}
BBuffer* buffer = mBufferQueue->PopFirstBuffer(0);
LOOP("Popped buffer %08x, Start time: %.4f, system time: %.4f diff: %.4f\n",
buffer,
(double) buffer->Header()->start_time/M1,
(double) perfTimeNow/M1,
(double) (buffer->Header()->start_time - perfTimeNow)/M1);
// Display frame if we're in B_OFFLINE mode or
// within +/- a half frame time of start time
if ( (mRunMode == B_OFFLINE) ||
((perfTimeNow > (buffer->Header()->start_time - halfPeriod)) &&
(perfTimeNow < (buffer->Header()->start_time + halfPeriod))) ) {
uint32 bpp = (mColorspace == B_RGB32 ? 4 : 2);
memcpy(m_Bitmap->Bits(), buffer->Data(), mRowBytes * mYSize * bpp);
buffer->Header()->start_time = system_time();
buffer->Recycle();
bigtime_t t1 = system_time();
// Update view
if (LockLooper()) {
DrawBitmap(m_Bitmap, Bounds());
UnlockLooper();
//.........这里部分代码省略.........