本文整理汇总了C++中BBuffer::Data方法的典型用法代码示例。如果您正苦于以下问题:C++ BBuffer::Data方法的具体用法?C++ BBuffer::Data怎么用?C++ BBuffer::Data使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BBuffer
的用法示例。
在下文中一共展示了BBuffer::Data方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: BufferDuration
BBuffer*
SoundPlayNode::FillNextBuffer(bigtime_t eventTime)
{
CALLED();
// get a buffer from our buffer group
BBuffer* buffer = fBufferGroup->RequestBuffer(
fOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2);
// If we fail to get a buffer (for example, if the request times out), we
// skip this buffer and go on to the next, to avoid locking up the control
// thread
if (buffer == NULL) {
ERROR("SoundPlayNode::FillNextBuffer: RequestBuffer failed\n");
return NULL;
}
if (fPlayer->HasData()) {
fPlayer->PlayBuffer(buffer->Data(),
fOutput.format.u.raw_audio.buffer_size, fOutput.format.u.raw_audio);
} else
memset(buffer->Data(), 0, fOutput.format.u.raw_audio.buffer_size);
// fill in the buffer header
media_header* header = buffer->Header();
header->type = B_MEDIA_RAW_AUDIO;
header->size_used = fOutput.format.u.raw_audio.buffer_size;
header->time_source = TimeSource()->ID();
header->start_time = eventTime;
return buffer;
}
示例2: BufferDuration
BBuffer*
GameProducer::FillNextBuffer(bigtime_t event_time)
{
// get a buffer from our buffer group
BBuffer* buf = fBufferGroup->RequestBuffer(fBufferSize, BufferDuration());
// if we fail to get a buffer (for example, if the request times out), we
// skip this buffer and go on to the next, to avoid locking up the control
// thread.
if (!buf)
return NULL;
// we need to discribe the buffer
int64 frames = int64(fBufferSize / fFrameSize);
memset(buf->Data(), 0, fBufferSize);
// now fill the buffer with data, continuing where the last buffer left off
fObject->Play(buf->Data(), frames);
// fill in the buffer header
media_header* hdr = buf->Header();
hdr->type = B_MEDIA_RAW_AUDIO;
hdr->size_used = fBufferSize;
hdr->time_source = TimeSource()->ID();
bigtime_t stamp;
if (RunMode() == B_RECORDING) {
// In B_RECORDING mode, we stamp with the capture time. We're not
// really a hardware capture node, but we simulate it by using the
// (precalculated) time at which this buffer "should" have been created.
stamp = event_time;
} else {
// okay, we're in one of the "live" performance run modes. in these
// modes, we stamp the buffer with the time at which the buffer should
// be rendered to the output, not with the capture time. fStartTime is
// the cached value of the first buffer's performance time; we calculate
// this buffer's performance time as an offset from that time, based on
// the amount of media we've created so far.
// Recalculating every buffer like this avoids accumulation of error.
stamp = fStartTime + bigtime_t(double(fFramesSent)
/ double(fOutput.format.u.raw_audio.frame_rate) * 1000000.0);
}
hdr->start_time = stamp;
return buf;
}
示例3: BBufferGroup
void
FireWireDVNode::card_reader_thread()
{
status_t err;
size_t rbufsize;
int rcount;
fCard->GetBufInfo(&rbufsize, &rcount);
delete fBufferGroupEncVideo;
fBufferGroupEncVideo = new BBufferGroup(rbufsize, rcount);
while (!fTerminateThreads) {
void *data, *end;
ssize_t sizeUsed = fCard->Read(&data);
if (sizeUsed < 0) {
TRACE("FireWireDVNode::%s: %s\n", __FUNCTION__,
strerror(sizeUsed));
continue;
}
end = (char*)data + sizeUsed;
while (data < end) {
BBuffer* buf = fBufferGroupEncVideo->RequestBuffer(rbufsize, 10000);
if (!buf) {
TRACE("OutVideo: request buffer timout\n");
continue;
}
err = fCard->Extract(buf->Data(), &data, &sizeUsed);
if (err) {
buf->Recycle();
printf("OutVideo Extract error %s\n", strerror(err));
continue;
}
media_header* hdr = buf->Header();
hdr->type = B_MEDIA_ENCODED_VIDEO;
hdr->size_used = sizeUsed;
hdr->time_source = TimeSource()->ID(); // set time source id
//what should the start_time be?
hdr->start_time = TimeSource()->PerformanceTimeFor(system_time());
fLock.Lock();
if (SendBuffer(buf, fOutputEncVideo.source,
fOutputEncVideo.destination) != B_OK) {
TRACE("OutVideo: sending buffer failed\n");
buf->Recycle();
}
fLock.Unlock();
}
}
}
示例4: HandleBuffer
// how should we handle late buffers? drop them?
// notify the producer?
status_t ESDSinkNode::HandleBuffer(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent)
{
CALLED();
BBuffer * buffer = const_cast<BBuffer*>((BBuffer*)event->pointer);
if (buffer == 0) {
fprintf(stderr,"<- B_BAD_VALUE\n");
return B_BAD_VALUE;
}
if(fInput.destination.id != buffer->Header()->destination) {
fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION\n");
return B_MEDIA_BAD_DESTINATION;
}
media_header* hdr = buffer->Header();
bigtime_t now = TimeSource()->Now();
bigtime_t perf_time = hdr->start_time;
// the how_early calculate here doesn't include scheduling latency because
// we've already been scheduled to handle the buffer
bigtime_t how_early = perf_time - EventLatency() - now;
// if the buffer is late, we ignore it and report the fact to the producer
// who sent it to us
if ((RunMode() != B_OFFLINE) && // lateness doesn't matter in offline mode...
(RunMode() != B_RECORDING) && // ...or in recording mode
(how_early < 0LL))
{
//mLateBuffers++;
NotifyLateProducer(fInput.source, -how_early, perf_time);
fprintf(stderr," <- LATE BUFFER : %lli\n", how_early);
buffer->Recycle();
} else {
if (fDevice->CanSend())
fDevice->Write(buffer->Data(), buffer->SizeUsed());
}
return B_OK;
}
示例5: printf
status_t
ClientNode::_InitOutputPorts()
{
//printf("JackClient::_InitOutputPorts()\n");
JackPortList* outputPorts = fOwner->GetOutputPorts();
for (int i = 0; i < outputPorts->CountItems(); i++) {
JackPort* port = outputPorts->ItemAt(i);
if (!port->IsConnected())
return B_ERROR;
BBuffer* buffer = fBufferGroup->RequestBuffer(
fFormat.u.raw_audio.buffer_size);
if (buffer == NULL || buffer->Data() == NULL) {
printf("RequestBuffer failed\n");
return B_ERROR;
}
port->SetProcessingBuffer(buffer);
}
return B_OK;
}
示例6: _
//.........这里部分代码省略.........
fManager->Unlock();
}
// next frame
fFrame++;
// Send buffers only, if playing, the node is running and the
// output has been enabled
} else if (playingDirection != 0 || forceSendingBuffer) {
ldebug("VideoProducer: produce frame\n");
BAutolock _(fLock);
// Fetch a buffer from the buffer group
BBuffer *buffer = fUsedBufferGroup->RequestBuffer(
fConnectedFormat.display.bytes_per_row
* fConnectedFormat.display.line_count, 0LL);
if (buffer) {
// Fill out the details about this buffer.
media_header *h = buffer->Header();
h->type = B_MEDIA_RAW_VIDEO;
h->time_source = TimeSource()->ID();
h->size_used = fConnectedFormat.display.bytes_per_row
* fConnectedFormat.display.line_count;
// For a buffer originating from a device, you might
// want to calculate this based on the
// PerformanceTimeFor the time your buffer arrived at
// the hardware (plus any applicable adjustments).
h->start_time = fPerformanceTimeBase + performanceTime;
h->file_pos = 0;
h->orig_size = 0;
h->data_offset = 0;
h->u.raw_video.field_gamma = 1.0;
h->u.raw_video.field_sequence = fFrame;
h->u.raw_video.field_number = 0;
h->u.raw_video.pulldown_number = 0;
h->u.raw_video.first_active_line = 1;
h->u.raw_video.line_count
= fConnectedFormat.display.line_count;
// Fill in a frame
media_format mf;
mf.type = B_MEDIA_RAW_VIDEO;
mf.u.raw_video = fConnectedFormat;
ldebug("VideoProducer: frame: %Ld, playlistFrame: %Ld\n", fFrame, playlistFrame);
bool forceOrWasCached = forceSendingBuffer;
// if (fManager->LockWithTimeout(5000) == B_OK) {
// we need to lock the manager, or our
// fSupplier might work on bad data
err = fSupplier->FillBuffer(playlistFrame,
buffer->Data(), &mf,
forceOrWasCached);
// fManager->Unlock();
// } else {
// err = B_ERROR;
// }
// clean the buffer if something went wrong
if (err != B_OK) {
memset(buffer->Data(), 0, h->size_used);
err = B_OK;
}
// Send the buffer on down to the consumer
if (!forceOrWasCached) {
if (SendBuffer(buffer, fOutput.source,
fOutput.destination) != B_OK) {
printf("_FrameGenerator: Error sending buffer\n");
// If there is a problem sending the buffer,
// or if we don't send the buffer because its
// contents are the same as the last one,
// return it to its buffer group.
buffer->Recycle();
// we tell the supplier to delete
// its caches if there was a problem sending
// the buffer
fSupplier->DeleteCaches();
}
} else
buffer->Recycle();
// Only if everything went fine we clear the flag
// that forces us to send a buffer even if not
// playing.
if (err == B_OK) {
forceSendingBuffer = false;
lastFrameSentAt = performanceTime;
}
}
else ldebug("no buffer!\n");
// next frame
fFrame++;
} else {
ldebug("VideoProducer: not playing\n");
// next frame
fFrame++;
}
break;
default:
ldebug("Couldn't acquire semaphore. Error: %s\n", strerror(err));
running = false;
break;
}
}
ldebug("VideoProducer: frame generator thread done.\n");
return B_OK;
}
示例7: TimeSource
void
TVideoPreviewView::DisplayThread()
{
FUNCTION("TVideoPreviewView::DisplayThread\n");
bigtime_t timeout = 5000;
bigtime_t realTimeNow = 0;
bigtime_t perfTimeNow = 0;
bigtime_t halfPeriod = (bigtime_t) (500000./29.97);
bool timeSourceRunning = false;
while (!mDisplayQuit) {
if (acquire_sem(mServiceLock) == B_NO_ERROR) {
timeSourceRunning = TimeSource()->IsRunning();
realTimeNow = BTimeSource::RealTime();
perfTimeNow = TimeSource()->Now();
release_sem(mServiceLock);
}
snooze(timeout);
if (timeSourceRunning) {
// if we received a Stop, deal with it
if (mStopping) {
PROGRESS("VidConsumer::DisplayThread - STOP\n");
if (perfTimeNow >= mStopTime) {
mRunning = false;
mStopping = false;
// deal with any pending Seek
if (mSeeking)
mSeeking = false;
//if (mConnected)
// SendDataStatus(B_DATA_NOT_AVAILABLE, mConnections[0], mStopTime);
continue;
}
}
// if we received a Seek, deal with it
if (mSeeking) {
PROGRESS("VidConsumer::DisplayThread - SEEK\n");
if (perfTimeNow >= mSeekTime) {
PROGRESS("VidConsumer::DisplayThread - DO SEEK\n");
mSeeking = false;
mDeltaTime = mMediaTime;
continue;
}
}
// if we received a Start, deal with it
if (mStarting) {
PROGRESS("BBt848Controllable::CaptureRun mStartTime = %.4f TimeNow = %.4f\n", (double)mStartTime/M1, (double)perfTimeNow/M1);
if (perfTimeNow >= mStartTime) {
mRunning = true;
mStarting = false;
mDeltaTime = mStartTime;
//if (mConnected)
// SendDataStatus(B_DATA_AVAILABLE, mConnections[0], mStartTime);
continue;
}
}
if (mRunning) {
// check for buffer available.
status_t err = acquire_sem_etc(mBufferAvailable, 1, B_TIMEOUT, halfPeriod * 2);
if (err == B_TIMED_OUT || !mConnected) {
ERROR("VidConsumer::DisplayThread - Error from acquire_sem_etc: 0x%lx\n", err);
continue;
}
BBuffer* buffer = mBufferQueue->PopFirstBuffer(0);
LOOP("Popped buffer %08x, Start time: %.4f, system time: %.4f diff: %.4f\n",
buffer,
(double) buffer->Header()->start_time/M1,
(double) perfTimeNow/M1,
(double) (buffer->Header()->start_time - perfTimeNow)/M1);
// Display frame if we're in B_OFFLINE mode or
// within +/- a half frame time of start time
if ( (mRunMode == B_OFFLINE) ||
((perfTimeNow > (buffer->Header()->start_time - halfPeriod)) &&
(perfTimeNow < (buffer->Header()->start_time + halfPeriod))) ) {
uint32 bpp = (mColorspace == B_RGB32 ? 4 : 2);
memcpy(m_Bitmap->Bits(), buffer->Data(), mRowBytes * mYSize * bpp);
buffer->Header()->start_time = system_time();
buffer->Recycle();
bigtime_t t1 = system_time();
// Update view
if (LockLooper()) {
DrawBitmap(m_Bitmap, Bounds());
UnlockLooper();
//.........这里部分代码省略.........
示例8: message
BBuffer*
AudioProducer::_FillNextBuffer(bigtime_t eventTime)
{
BBuffer* buffer = fBufferGroup->RequestBuffer(
fOutput.format.u.raw_audio.buffer_size, BufferDuration());
if (!buffer) {
ERROR("AudioProducer::_FillNextBuffer() - no buffer\n");
return NULL;
}
size_t sampleSize = fOutput.format.u.raw_audio.format
& media_raw_audio_format::B_AUDIO_SIZE_MASK;
size_t numSamples = fOutput.format.u.raw_audio.buffer_size / sampleSize;
// number of sample in the buffer
// fill in the buffer header
media_header* header = buffer->Header();
header->type = B_MEDIA_RAW_AUDIO;
header->time_source = TimeSource()->ID();
buffer->SetSizeUsed(fOutput.format.u.raw_audio.buffer_size);
bigtime_t performanceTime = bigtime_t(double(fFramesSent)
* 1000000.0 / double(fOutput.format.u.raw_audio.frame_rate));
// fill in data from audio supplier
int64 frameCount = numSamples / fOutput.format.u.raw_audio.channel_count;
bigtime_t startTime = performanceTime;
bigtime_t endTime = bigtime_t(double(fFramesSent + frameCount)
* 1000000.0 / fOutput.format.u.raw_audio.frame_rate);
if (!fSupplier || fSupplier->InitCheck() != B_OK
|| fSupplier->GetFrames(buffer->Data(), frameCount, startTime,
endTime) != B_OK) {
ERROR("AudioProducer::_FillNextBuffer() - supplier error -> silence\n");
memset(buffer->Data(), 0, buffer->SizeUsed());
}
// stamp buffer
if (RunMode() == B_RECORDING) {
header->start_time = eventTime;
} else {
header->start_time = fStartTime + performanceTime;
}
#if DEBUG_TO_FILE
BMediaTrack* track;
if (BMediaFile* file = init_media_file(fOutput.format, &track)) {
track->WriteFrames(buffer->Data(), frameCount);
}
#endif // DEBUG_TO_FILE
if (fPeakListener
&& fOutput.format.u.raw_audio.format
== media_raw_audio_format::B_AUDIO_FLOAT) {
// TODO: extend the peak notifier for other sample formats
int32 channels = fOutput.format.u.raw_audio.channel_count;
float max[channels];
float min[channels];
for (int32 i = 0; i < channels; i++) {
max[i] = -1.0;
min[i] = 1.0;
}
float* sample = (float*)buffer->Data();
for (uint32 i = 0; i < frameCount; i++) {
for (int32 k = 0; k < channels; k++) {
if (*sample < min[k])
min[k] = *sample;
if (*sample > max[k])
max[k] = *sample;
sample++;
}
}
BMessage message(MSG_PEAK_NOTIFICATION);
for (int32 i = 0; i < channels; i++) {
float maxAbs = max_c(fabs(min[i]), fabs(max[i]));
message.AddFloat("max", maxAbs);
}
bigtime_t realTime = TimeSource()->RealTimeFor(
fStartTime + performanceTime, 0);
MessageEvent* event = new (std::nothrow) MessageEvent(realTime,
fPeakListener, message);
if (event != NULL)
EventQueue::Default().AddEvent(event);
}
return buffer;
}
示例9: BufferDuration
BBuffer*
ToneProducer::FillNextBuffer(bigtime_t event_time)
{
// get a buffer from our buffer group
BBuffer* buf = mBufferGroup->RequestBuffer(mOutput.format.u.raw_audio.buffer_size, BufferDuration());
// if we fail to get a buffer (for example, if the request times out), we skip this
// buffer and go on to the next, to avoid locking up the control thread
if (!buf)
{
return NULL;
}
// now fill it with data, continuing where the last buffer left off
// 20sep99: multichannel support
size_t numFrames =
mOutput.format.u.raw_audio.buffer_size /
(sizeof(float)*mOutput.format.u.raw_audio.channel_count);
bool stereo = (mOutput.format.u.raw_audio.channel_count == 2);
if(!stereo) {
ASSERT(mOutput.format.u.raw_audio.channel_count == 1);
}
// PRINT(("buffer: %ld, %ld frames, %s\n", mOutput.format.u.raw_audio.buffer_size, numFrames, stereo ? "stereo" : "mono"));
float* data = (float*) buf->Data();
switch (mWaveform)
{
case SINE_WAVE:
FillSineBuffer(data, numFrames, stereo);
break;
case TRIANGLE_WAVE:
FillTriangleBuffer(data, numFrames, stereo);
break;
case SAWTOOTH_WAVE:
FillSawtoothBuffer(data, numFrames, stereo);
break;
}
// fill in the buffer header
media_header* hdr = buf->Header();
hdr->type = B_MEDIA_RAW_AUDIO;
hdr->size_used = mOutput.format.u.raw_audio.buffer_size;
hdr->time_source = TimeSource()->ID();
bigtime_t stamp;
if (RunMode() == B_RECORDING)
{
// In B_RECORDING mode, we stamp with the capture time. We're not
// really a hardware capture node, but we simulate it by using the (precalculated)
// time at which this buffer "should" have been created.
stamp = event_time;
}
else
{
// okay, we're in one of the "live" performance run modes. in these modes, we
// stamp the buffer with the time at which the buffer should be rendered to the
// output, not with the capture time. mStartTime is the cached value of the
// first buffer's performance time; we calculate this buffer's performance time as
// an offset from that time, based on the amount of media we've created so far.
// Recalculating every buffer like this avoids accumulation of error.
stamp = mStartTime + bigtime_t(double(mFramesSent) / double(mOutput.format.u.raw_audio.frame_rate) * 1000000.0);
}
hdr->start_time = stamp;
return buf;
}
示例10: _
//.........这里部分代码省略.........
fFrame++;
} else if (playingDirection != 0 || forceSendingBuffer) {
// Send buffers only, if playing, the node is running and
// the output has been enabled
TRACE("_FrameGeneratorThread: produce frame\n");
BAutolock _(fLock);
// Fetch a buffer from the buffer group
fUsedBufferGroup->WaitForBuffers();
BBuffer* buffer = fUsedBufferGroup->RequestBuffer(
fConnectedFormat.display.bytes_per_row
* fConnectedFormat.display.line_count, 0LL);
if (buffer == NULL) {
// Wait until a buffer becomes available again
ERROR("_FrameGeneratorThread: no buffer!\n");
break;
}
// Fill out the details about this buffer.
media_header* h = buffer->Header();
h->type = B_MEDIA_RAW_VIDEO;
h->time_source = TimeSource()->ID();
h->size_used = fConnectedFormat.display.bytes_per_row
* fConnectedFormat.display.line_count;
// For a buffer originating from a device, you might
// want to calculate this based on the
// PerformanceTimeFor the time your buffer arrived at
// the hardware (plus any applicable adjustments).
h->start_time = fPerformanceTimeBase + performanceTime;
h->file_pos = 0;
h->orig_size = 0;
h->data_offset = 0;
h->u.raw_video.field_gamma = 1.0;
h->u.raw_video.field_sequence = fFrame;
h->u.raw_video.field_number = 0;
h->u.raw_video.pulldown_number = 0;
h->u.raw_video.first_active_line = 1;
h->u.raw_video.line_count
= fConnectedFormat.display.line_count;
// Fill in a frame
TRACE("_FrameGeneratorThread: frame: %Ld, "
"playlistFrame: %Ld\n", fFrame, playlistFrame);
bool wasCached = false;
err = fSupplier->FillBuffer(playlistFrame,
buffer->Data(), fConnectedFormat, forceSendingBuffer,
wasCached);
if (err == B_TIMED_OUT) {
// Don't send the buffer if there was insufficient
// time for rendering, this will leave the last
// valid frame on screen until we catch up, instead
// of going black.
wasCached = true;
err = B_OK;
}
// clean the buffer if something went wrong
if (err != B_OK) {
// TODO: should use "back value" according
// to color space!
memset(buffer->Data(), 0, h->size_used);
err = B_OK;
}
// Send the buffer on down to the consumer
if (wasCached || (err = SendBuffer(buffer, fOutput.source,
fOutput.destination) != B_OK)) {
// If there is a problem sending the buffer,
// or if we don't send the buffer because its
// contents are the same as the last one,
// return it to its buffer group.
buffer->Recycle();
// we tell the supplier to delete
// its caches if there was a problem sending
// the buffer
if (err != B_OK) {
ERROR("_FrameGeneratorThread: Error "
"sending buffer\n");
fSupplier->DeleteCaches();
}
}
// Only if everything went fine we clear the flag
// that forces us to send a buffer even if not
// playing.
if (err == B_OK)
forceSendingBuffer = false;
// next frame
fFrame++;
droppedFrames = 0;
} else {
TRACE("_FrameGeneratorThread: not playing\n");
// next frame
fFrame++;
}
break;
default:
TRACE("_FrameGeneratorThread: Couldn't acquire semaphore. "
"Error: %s\n", strerror(err));
running = false;
break;
}
}
TRACE("_FrameGeneratorThread: frame generator thread done.\n");
return B_OK;
}
示例11: main
int main()
{
// app_server connection (no need to run it)
BApplication app("application/x-vnd-test");
BBufferGroup * group;
status_t s;
int32 count;
BBuffer *buffer;
/*
printf("using default constructor:\n");
group = new BBufferGroup();
s = group->InitCheck();
printf("InitCheck: status = %ld\n",s);
s = group->CountBuffers(&count);
printf("CountBuffers: count = %ld, status = %ld\n",count,s);
delete group;
*/
printf("\n");
printf("using size = 1234 constructor:\n");
group = new BBufferGroup(1234);
s = group->InitCheck();
printf("InitCheck: status = %ld\n",s);
s = group->CountBuffers(&count);
printf("CountBuffers: count = %ld, status = %ld\n",count,s);
s = group->GetBufferList(1,&buffer);
printf("GetBufferList: status = %ld\n",s);
printf("Buffer->Data: = %08x\n",(int)buffer->Data());
printf("Buffer->ID: = %d\n",(int)buffer->ID());
printf("Buffer->Size: = %ld\n",buffer->Size());
printf("Buffer->SizeAvailable: = %ld\n",buffer->SizeAvailable());
printf("Buffer->SizeUsed: = %ld\n",buffer->SizeUsed());
printf("\n");
media_buffer_id id = buffer->ID();
BBufferGroup * group2 = new BBufferGroup(1,&id);
printf("creating second group with a buffer from first group:\n");
s = group2->InitCheck();
printf("InitCheck: status = %ld\n",s);
s = group2->CountBuffers(&count);
printf("CountBuffers: count = %ld, status = %ld\n",count,s);
buffer = 0;
s = group2->GetBufferList(1,&buffer);
printf("GetBufferList: status = %ld\n",s);
printf("Buffer->Data: = %08x\n",(int)buffer->Data());
printf("Buffer->ID: = %d\n",(int)buffer->ID());
printf("Buffer->Size: = %ld\n",buffer->Size());
printf("Buffer->SizeAvailable: = %ld\n",buffer->SizeAvailable());
printf("Buffer->SizeUsed: = %ld\n",buffer->SizeUsed());
delete group;
delete group2;
printf("\n");
/*
printf("creating a BSmallBuffer:\n");
BSmallBuffer * sb = new BSmallBuffer;
printf("sb->Data: = %08x\n",(int)sb->Data());
printf("sb->ID: = %d\n",(int)sb->ID());
printf("sb->Size: = %ld\n",sb->Size());
printf("sb->SizeAvailable: = %ld\n",sb->SizeAvailable());
printf("sb->SizeUsed: = %ld\n",sb->SizeUsed());
printf("sb->SmallBufferSizeLimit: = %ld\n",sb->SmallBufferSizeLimit());
delete sb;
*/
return 0;
}
示例12: max
void
MixerCore::_MixThread()
{
// The broken BeOS R5 multiaudio node starts with time 0,
// then publishes negative times for about 50ms, publishes 0
// again until it finally reaches time values > 0
if (!LockFromMixThread())
return;
bigtime_t start = fTimeSource->Now();
Unlock();
while (start <= 0) {
TRACE("MixerCore: delaying _MixThread start, timesource is at %Ld\n",
start);
snooze(5000);
if (!LockFromMixThread())
return;
start = fTimeSource->Now();
Unlock();
}
if (!LockFromMixThread())
return;
bigtime_t latency = max((bigtime_t)3600, bigtime_t(0.4 * buffer_duration(
fOutput->MediaOutput().format.u.raw_audio)));
// TODO: when the format changes while running, everything is wrong!
bigtime_t bufferRequestTimeout = buffer_duration(
fOutput->MediaOutput().format.u.raw_audio) / 2;
TRACE("MixerCore: starting _MixThread at %Ld with latency %Ld and "
"downstream latency %Ld, bufferRequestTimeout %Ld\n", start, latency,
fDownstreamLatency, bufferRequestTimeout);
// We must read from the input buffer at a position (pos) that is always
// a multiple of fMixBufferFrameCount.
int64 temp = frames_for_duration(fMixBufferFrameRate, start);
int64 frameBase = ((temp / fMixBufferFrameCount) + 1)
* fMixBufferFrameCount;
bigtime_t timeBase = duration_for_frames(fMixBufferFrameRate, frameBase);
Unlock();
TRACE("MixerCore: starting _MixThread, start %Ld, timeBase %Ld, "
"frameBase %Ld\n", start, timeBase, frameBase);
ASSERT(fMixBufferFrameCount > 0);
#if DEBUG
uint64 bufferIndex = 0;
#endif
typedef RtList<chan_info> chan_info_list;
chan_info_list inputChanInfos[MAX_CHANNEL_TYPES];
BStackOrHeapArray<chan_info_list, 16> mixChanInfos(fMixBufferChannelCount);
// TODO: this does not support changing output channel count
bigtime_t eventTime = timeBase;
int64 framePos = 0;
for (;;) {
if (!LockFromMixThread())
return;
bigtime_t waitUntil = fTimeSource->RealTimeFor(eventTime, 0)
- latency - fDownstreamLatency;
Unlock();
status_t rv = acquire_sem_etc(fMixThreadWaitSem, 1, B_ABSOLUTE_TIMEOUT,
waitUntil);
if (rv == B_INTERRUPTED)
continue;
if (rv != B_TIMED_OUT && rv < B_OK)
return;
if (!LockWithTimeout(10000)) {
ERROR("MixerCore: LockWithTimeout failed\n");
continue;
}
// no inputs or output muted, skip further processing and just send an
// empty buffer
if (fInputs->IsEmpty() || fOutput->IsMuted()) {
int size = fOutput->MediaOutput().format.u.raw_audio.buffer_size;
BBuffer* buffer = fBufferGroup->RequestBuffer(size,
bufferRequestTimeout);
if (buffer != NULL) {
memset(buffer->Data(), 0, size);
// fill in the buffer header
media_header* hdr = buffer->Header();
hdr->type = B_MEDIA_RAW_AUDIO;
hdr->size_used = size;
hdr->time_source = fTimeSource->ID();
hdr->start_time = eventTime;
if (fNode->SendBuffer(buffer, fOutput) != B_OK) {
#if DEBUG
ERROR("MixerCore: SendBuffer failed for buffer %Ld\n",
bufferIndex);
#else
ERROR("MixerCore: SendBuffer failed\n");
#endif
buffer->Recycle();
}
} else {
#if DEBUG
//.........这里部分代码省略.........
示例13: _
/* The following functions form the thread that generates frames. You should
* replace this with the code that interfaces to your hardware. */
int32
VideoProducer::FrameGenerator()
{
bigtime_t wait_until = system_time();
while (1) {
PRINTF(1, ("FrameGenerator: acquire_sem_etc() until %Ldµs (in %Ldµs)\n", wait_until, wait_until - system_time()));
status_t err = acquire_sem_etc(fFrameSync, 1, B_ABSOLUTE_TIMEOUT,
wait_until);
/* The only acceptable responses are B_OK and B_TIMED_OUT. Everything
* else means the thread should quit. Deleting the semaphore, as in
* VideoProducer::HandleStop(), will trigger this behavior. */
if ((err != B_OK) && (err != B_TIMED_OUT))
break;
fFrame++;
/* Recalculate the time until the thread should wake up to begin
* processing the next frame. Subtract fProcessingLatency so that
* the frame is sent in time. */
wait_until = TimeSource()->RealTimeFor(fPerformanceTimeBase, 0) +
(bigtime_t)
((fFrame - fFrameBase) *
(1000000 / fConnectedFormat.field_rate)) -
fProcessingLatency;
PRINT(("PS: %Ld\n", fProcessingLatency));
/* Drop frame if it's at least a frame late */
if (wait_until < system_time())
continue;
PRINTF(1, ("FrameGenerator: wait until %Ld, %ctimed out, %crunning, %cenabled.\n",
wait_until,
(err == B_OK)?'!':' ',
(fRunning)?' ':'!',
(fEnabled)?' ':'!'));
/* If the semaphore was acquired successfully, it means something
* changed the timing information (see VideoProducer::Connect()) and
* so the thread should go back to sleep until the newly-calculated
* wait_until time. */
if (err == B_OK)
continue;
/* Send buffers only if the node is running and the output has been
* enabled */
if (!fRunning || !fEnabled)
continue;
BAutolock _(fLock);
/* Fetch a buffer from the buffer group */
BBuffer *buffer = fBufferGroup->RequestBuffer(
4 * fConnectedFormat.display.line_width *
fConnectedFormat.display.line_count, 0LL);
if (!buffer)
continue;
/* Fill out the details about this buffer. */
media_header *h = buffer->Header();
h->type = B_MEDIA_RAW_VIDEO;
h->time_source = TimeSource()->ID();
h->size_used = 4 * fConnectedFormat.display.line_width *
fConnectedFormat.display.line_count;
/* For a buffer originating from a device, you might want to calculate
* this based on the PerformanceTimeFor the time your buffer arrived at
* the hardware (plus any applicable adjustments). */
/*
h->start_time = fPerformanceTimeBase +
(bigtime_t)
((fFrame - fFrameBase) *
(1000000 / fConnectedFormat.field_rate));
*/
h->file_pos = 0;
h->orig_size = 0;
h->data_offset = 0;
h->u.raw_video.field_gamma = 1.0;
h->u.raw_video.field_sequence = fFrame;
h->u.raw_video.field_number = 0;
h->u.raw_video.pulldown_number = 0;
h->u.raw_video.first_active_line = 1;
h->u.raw_video.line_count = fConnectedFormat.display.line_count;
// This is where we fill the video buffer.
#if 0
uint32 *p = (uint32 *)buffer->Data();
/* Fill in a pattern */
for (uint32 y=0;y<fConnectedFormat.display.line_count;y++)
for (uint32 x=0;x<fConnectedFormat.display.line_width;x++)
*(p++) = ((((x+y)^0^x)+fFrame) & 0xff) * (0x01010101 & fColor);
#endif
//NO! must be called without lock!
//BAutolock lock(fCamDevice->Locker());
bigtime_t now = system_time();
//.........这里部分代码省略.........
示例14: _
/* The following functions form the thread that generates frames. You should
* replace this with the code that interfaces to your hardware. */
int32
FinePixProducer::FrameGenerator()
{
bigtime_t wait_until = system_time();
while (1) {
status_t err = acquire_sem_etc(fFrameSync, 1, B_ABSOLUTE_TIMEOUT,
wait_until);
/* The only acceptable responses are B_OK and B_TIMED_OUT. Everything
* else means the thread should quit. Deleting the semaphore, as in
* FinePixProducer::HandleStop(), will trigger this behavior. */
if ((err != B_OK) && (err != B_TIMED_OUT))
break;
fFrame++;
/* Recalculate the time until the thread should wake up to begin
* processing the next frame. Subtract fProcessingLatency so that
* the frame is sent in time. */
wait_until = TimeSource()->RealTimeFor(fPerformanceTimeBase, 0) +
(bigtime_t)
((fFrame - fFrameBase) *
(1000000 / fConnectedFormat.field_rate)) -
fProcessingLatency;
/* Drop frame if it's at least a frame late */
if (wait_until < system_time())
continue;
/* If the semaphore was acquired successfully, it means something
* changed the timing information (see FinePixProducer::Connect()) and
* so the thread should go back to sleep until the newly-calculated
* wait_until time. */
if (err == B_OK)
continue;
/* Send buffers only if the node is running and the output has been
* enabled */
if (!fRunning || !fEnabled)
continue;
BAutolock _(fLock);
// Get the frame from the camera
fCam->GetPic(fDeltaBuffer, frame_size);
/* Fetch a buffer from the buffer group */
BBuffer *buffer = fBufferGroup->RequestBuffer(
4 * fConnectedFormat.display.line_width *
fConnectedFormat.display.line_count, 0LL);
if (!buffer)
continue;
/* Fill out the details about this buffer. */
media_header *h = buffer->Header();
h->type = B_MEDIA_RAW_VIDEO;
h->time_source = TimeSource()->ID();
h->size_used = 4 * fConnectedFormat.display.line_width *
fConnectedFormat.display.line_count;
/* For a buffer originating from a device, you might want to calculate
* this based on the PerformanceTimeFor the time your buffer arrived at
* the hardware (plus any applicable adjustments).
h->start_time = fPerformanceTimeBase +
(bigtime_t)
((fFrame - fFrameBase) *
(1000000 / fConnectedFormat.field_rate));*/
h->start_time = TimeSource()->Now();
h->file_pos = 0;
h->orig_size = 0;
h->data_offset = 0;
h->u.raw_video.field_gamma = 1.0;
h->u.raw_video.field_sequence = fFrame;
h->u.raw_video.field_number = 0;
h->u.raw_video.pulldown_number = 0;
h->u.raw_video.first_active_line = 1;
h->u.raw_video.line_count = fConnectedFormat.display.line_count;
// Frame data pointers
uint8 *tmp24 = (uint8*)tempInBuffer;
uint8 *dst = (uint8*)buffer->Data();
// Convert from jpeg to bitmap
if (jpeg_check_size(fDeltaBuffer,
FPIX_RGB24_WIDTH, FPIX_RGB24_HEIGHT))
{
int n = jpeg_decode(fDeltaBuffer, tmp24,
FPIX_RGB24_WIDTH, FPIX_RGB24_HEIGHT, 24, //32 not working
&decdata);
if (n)
{
PRINTF(-1, ("ooeps decode jpg result : %d", n));
}
} else
{
PRINTF(-1, ("ooeps check_size failed"));
}
//.........这里部分代码省略.........