本文整理汇总了C++中sp::findBuffer方法的典型用法代码示例。如果您正苦于以下问题:C++ sp::findBuffer方法的具体用法?C++ sp::findBuffer怎么用?C++ sp::findBuffer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sp
的用法示例。
在下文中一共展示了sp::findBuffer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: onMessageReceived
void TunnelRenderer::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatQueueBuffer:
{
sp<ABuffer> buffer;
CHECK(msg->findBuffer("buffer", &buffer));
queueBuffer(buffer);
if (mStreamSource == NULL) {
if (mTotalBytesQueued > 0ll) {
initPlayer();
} else {
ALOGI("Have %lld bytes queued...", mTotalBytesQueued);
}
} else {
mStreamSource->doSomeWork();
}
break;
}
default:
TRESPASS();
}
}
示例2: onDecoderNotify
void DirectRenderer::onDecoderNotify(const sp<AMessage> &msg) {
size_t trackIndex;
CHECK(msg->findSize("trackIndex", &trackIndex));
int32_t what;
CHECK(msg->findInt32("what", &what));
switch (what) {
case DecoderContext::kWhatOutputBufferReady:
{
size_t index;
CHECK(msg->findSize("index", &index));
int64_t timeUs;
CHECK(msg->findInt64("timeUs", &timeUs));
sp<ABuffer> buffer;
CHECK(msg->findBuffer("buffer", &buffer));
queueOutputBuffer(trackIndex, index, timeUs, buffer);
break;
}
default:
TRESPASS();
}
}
示例3:
bool NuPlayer::Decoder::supportsSeamlessAudioFormatChange(const sp<AMessage> &targetFormat) const {
if (targetFormat == NULL) {
return true;
}
AString mime;
if (!targetFormat->findString("mime", &mime)) {
return false;
}
if (!strcasecmp(mime.c_str(), MEDIA_MIMETYPE_AUDIO_AAC)) {
// field-by-field comparison
const char * keys[] = { "channel-count", "sample-rate", "is-adts" };
for (unsigned int i = 0; i < sizeof(keys) / sizeof(keys[0]); i++) {
int32_t oldVal, newVal;
if (!mFormat->findInt32(keys[i], &oldVal) || !targetFormat->findInt32(keys[i], &newVal)
|| oldVal != newVal) {
return false;
}
}
sp<ABuffer> oldBuf, newBuf;
if (mFormat->findBuffer("csd-0", &oldBuf) && targetFormat->findBuffer("csd-0", &newBuf)) {
if (oldBuf->size() != newBuf->size()) {
return false;
}
return !memcmp(oldBuf->data(), newBuf->data(), oldBuf->size());
}
}
return false;
}
示例4: if
void NuPlayer::HTTPLiveSource::onSessionNotify_l(const sp<AMessage> &msg) {
int32_t what;
CHECK(msg->findInt32("what", &what));
if(what == LiveSession::kWhatPicture) {
sp<ABuffer> metabuffer;
CHECK(msg->findBuffer("buffer", &metabuffer));
AString mimeType;
sp<ABuffer> buffer;
if(((metabuffer)->meta()->findString("mime", &mimeType)) &&
((metabuffer)->meta()->findBuffer("pictureBuffer", &buffer))) {
if (mMetaData == NULL) {
mMetaData = new MetaData;
}
mMetaData->setCString(kKeyAlbumArtMIME, mimeType.c_str());
mMetaData->setData(kKeyAlbumArt, MetaData::TYPE_NONE, buffer->data(), buffer->size());
ALOGI("kKeyAlbumArt set Data :%s, datasize:%d", mimeType.c_str(), buffer->size());
sp<AMessage> notify = dupNotify();
notify->setInt32("what", NuPlayer::Source::kWhatPicture);
notify->post();
}
}
else if (what == LiveSession::kWhatBufferingStart) {
sp<AMessage> notify = dupNotify();
notify->setInt32("what", kWhatBufferingStart);
notify->post();
}
else if (what == LiveSession::kWhatBufferingEnd) {
sp<AMessage> notify = dupNotify();
notify->setInt32("what", kWhatBufferingEnd);
notify->post();
}
}
示例5: AMessage
void NuPlayer::Decoder::configure(const sp<AMessage> &format) {
CHECK(mCodec == NULL);
AString mime;
CHECK(format->findString("mime", &mime));
sp<AMessage> notifyMsg =
new AMessage(kWhatCodecNotify, id());
mCSDIndex = 0;
for (size_t i = 0;; ++i) {
sp<ABuffer> csd;
if (!format->findBuffer(StringPrintf("csd-%d", i).c_str(), &csd)) {
break;
}
mCSD.push(csd);
}
#ifdef QCOM_HARDWARE
sp<ABuffer> extendedCSD = ExtendedCodec::getRawCodecSpecificData(format);
if (extendedCSD != NULL) {
ALOGV("pushing extended CSD of size %d", extendedCSD->size());
mCSD.push(extendedCSD);
}
sp<ABuffer> aacCSD = ExtendedCodec::getAacCodecSpecificData(format);
if (aacCSD != NULL) {
ALOGV("pushing AAC CSD of size %d", aacCSD->size());
mCSD.push(aacCSD);
}
#endif
if (mNativeWindow != NULL) {
format->setObject("native-window", mNativeWindow);
}
// Current video decoders do not return from OMX_FillThisBuffer
// quickly, violating the OpenMAX specs, until that is remedied
// we need to invest in an extra looper to free the main event
// queue.
bool needDedicatedLooper = !strncasecmp(mime.c_str(), "video/", 6);
mFormat = format;
mCodec = new ACodec;
if (needDedicatedLooper && mCodecLooper == NULL) {
mCodecLooper = new ALooper;
mCodecLooper->setName("NuPlayerDecoder");
mCodecLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
}
(needDedicatedLooper ? mCodecLooper : looper())->registerHandler(mCodec);
mCodec->setNotificationMessage(notifyMsg);
mCodec->initiateSetup(format);
}
示例6: ALOGV
sp<ABuffer> ExtendedCodec::getAacCodecSpecificData(
const sp<AMessage> &msg) {
sp<ABuffer> buffer;
if (msg->findBuffer(getMsgKey(kKeyAacCodecSpecificData), &buffer)) {
ALOGV("ACodec found kKeyAacCodecSpecificData of size %d\n", buffer->size());
return buffer;
}
return NULL;
}
示例7: onQueueAccessUnit
void DirectRenderer::onQueueAccessUnit(const sp<AMessage> &msg) {
size_t trackIndex;
CHECK(msg->findSize("trackIndex", &trackIndex));
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
CHECK_LT(trackIndex, 2u);
CHECK(mDecoderContext[trackIndex] != NULL);
mDecoderContext[trackIndex]->queueInputBuffer(accessUnit);
}
示例8: getHDRStaticInfoFromFormat
// static
bool ColorUtils::getHDRStaticInfoFromFormat(const sp<AMessage> &format, HDRStaticInfo *info) {
sp<ABuffer> buf;
if (!format->findBuffer("hdr-static-info", &buf)) {
return false;
}
// TODO: Make this more flexible when adding more members to HDRStaticInfo
if (buf->size() != 25 /* static Metadata Type 1 size */) {
ALOGW("Ignore invalid HDRStaticInfo with size: %zu", buf->size());
return false;
}
const uint8_t *data = buf->data();
if (*data != HDRStaticInfo::kType1) {
ALOGW("Unsupported static Metadata Type %u", *data);
return false;
}
info->mID = HDRStaticInfo::kType1;
info->sType1.mR.x = U16LE_AT(&data[1]);
info->sType1.mR.y = U16LE_AT(&data[3]);
info->sType1.mG.x = U16LE_AT(&data[5]);
info->sType1.mG.y = U16LE_AT(&data[7]);
info->sType1.mB.x = U16LE_AT(&data[9]);
info->sType1.mB.y = U16LE_AT(&data[11]);
info->sType1.mW.x = U16LE_AT(&data[13]);
info->sType1.mW.y = U16LE_AT(&data[15]);
info->sType1.mMaxDisplayLuminance = U16LE_AT(&data[17]);
info->sType1.mMinDisplayLuminance = U16LE_AT(&data[19]);
info->sType1.mMaxContentLightLevel = U16LE_AT(&data[21]);
info->sType1.mMaxFrameAverageLightLevel = U16LE_AT(&data[23]);
ALOGV("Got HDRStaticInfo from config (R: %u %u, G: %u %u, B: %u, %u, W: %u, %u, "
"MaxDispL: %u, MinDispL: %u, MaxContentL: %u, MaxFrameAvgL: %u)",
info->sType1.mR.x, info->sType1.mR.y, info->sType1.mG.x, info->sType1.mG.y,
info->sType1.mB.x, info->sType1.mB.y, info->sType1.mW.x, info->sType1.mW.y,
info->sType1.mMaxDisplayLuminance, info->sType1.mMinDisplayLuminance,
info->sType1.mMaxContentLightLevel, info->sType1.mMaxFrameAverageLightLevel);
return true;
}
示例9: CHECK
void DashPlayer::Decoder::onFillThisBuffer(const sp<AMessage> &msg) {
sp<AMessage> reply;
CHECK(msg->findMessage("reply", &reply));
#if 0
sp<ABuffer> outBuffer;
CHECK(msg->findBuffer("buffer", &outBuffer));
#else
sp<ABuffer> outBuffer;
#endif
if (mCSDIndex < mCSD.size()) {
outBuffer = mCSD.editItemAt(mCSDIndex++);
outBuffer->meta()->setInt64("timeUs", 0);
reply->setBuffer("buffer", outBuffer);
reply->post();
return;
}
sp<AMessage> notify = mNotify->dup();
notify->setMessage("codec-request", msg);
notify->post();
}
示例10: onInputBufferFilled
void MediaFilter::onInputBufferFilled(const sp<AMessage> &msg) {
IOMX::buffer_id bufferID;
CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
BufferInfo *info = findBufferByID(kPortIndexInput, bufferID);
if (mState != STARTED) {
// we're not running, so we'll just keep that buffer...
info->mStatus = BufferInfo::OWNED_BY_US;
return;
}
if (info->mGeneration != mGeneration) {
ALOGV("Caught a stale input buffer [ID %d]", bufferID);
// buffer is stale (taken before a flush/shutdown) - repost it
CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_US);
postFillThisBuffer(info);
return;
}
CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_UPSTREAM);
info->mStatus = BufferInfo::OWNED_BY_US;
sp<ABuffer> buffer;
int32_t err = OK;
bool eos = false;
if (!msg->findBuffer("buffer", &buffer)) {
// these are unfilled buffers returned by client
CHECK(msg->findInt32("err", &err));
if (err == OK) {
// buffers with no errors are returned on MediaCodec.flush
ALOGV("saw unfilled buffer (MediaCodec.flush)");
postFillThisBuffer(info);
return;
} else {
ALOGV("saw error %d instead of an input buffer", err);
eos = true;
}
buffer.clear();
}
int32_t isCSD;
if (buffer != NULL && buffer->meta()->findInt32("csd", &isCSD)
&& isCSD != 0) {
// ignore codec-specific data buffers
ALOGW("MediaFilter received a codec-specific data buffer");
postFillThisBuffer(info);
return;
}
int32_t tmp;
if (buffer != NULL && buffer->meta()->findInt32("eos", &tmp) && tmp) {
eos = true;
err = ERROR_END_OF_STREAM;
}
mAvailableInputBuffers.push_back(info);
processBuffers();
if (eos) {
mPortEOS[kPortIndexInput] = true;
mInputEOSResult = err;
}
ALOGV("Handled kWhatInputBufferFilled. [ID %u]", bufferID);
}
示例11: ConvertMessageToMap
status_t ConvertMessageToMap(
JNIEnv *env, const sp<AMessage> &msg, jobject *map) {
ScopedLocalRef<jclass> hashMapClazz(
env, env->FindClass("java/util/HashMap"));
if (hashMapClazz.get() == NULL) {
return -EINVAL;
}
jmethodID hashMapConstructID =
env->GetMethodID(hashMapClazz.get(), "<init>", "()V");
if (hashMapConstructID == NULL) {
return -EINVAL;
}
jmethodID hashMapPutID =
env->GetMethodID(
hashMapClazz.get(),
"put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;");
if (hashMapPutID == NULL) {
return -EINVAL;
}
jobject hashMap = env->NewObject(hashMapClazz.get(), hashMapConstructID);
for (size_t i = 0; i < msg->countEntries(); ++i) {
AMessage::Type valueType;
const char *key = msg->getEntryNameAt(i, &valueType);
jobject valueObj = NULL;
switch (valueType) {
case AMessage::kTypeInt32:
{
int32_t val;
CHECK(msg->findInt32(key, &val));
valueObj = makeIntegerObject(env, val);
break;
}
case AMessage::kTypeInt64:
{
int64_t val;
CHECK(msg->findInt64(key, &val));
valueObj = makeLongObject(env, val);
break;
}
case AMessage::kTypeFloat:
{
float val;
CHECK(msg->findFloat(key, &val));
valueObj = makeFloatObject(env, val);
break;
}
case AMessage::kTypeString:
{
AString val;
CHECK(msg->findString(key, &val));
valueObj = env->NewStringUTF(val.c_str());
break;
}
case AMessage::kTypeBuffer:
{
sp<ABuffer> buffer;
CHECK(msg->findBuffer(key, &buffer));
valueObj = makeByteBufferObject(
env, buffer->data(), buffer->size());
break;
}
case AMessage::kTypeRect:
{
int32_t left, top, right, bottom;
CHECK(msg->findRect(key, &left, &top, &right, &bottom));
SetMapInt32(
env,
hashMap,
hashMapPutID,
StringPrintf("%s-left", key).c_str(),
left);
SetMapInt32(
env,
hashMap,
hashMapPutID,
StringPrintf("%s-top", key).c_str(),
top);
//.........这里部分代码省略.........
示例12: CHECK
void DashPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
int32_t audio;
CHECK(msg->findInt32("audio", &audio));
if (audio) {
mHasAudio = true;
} else {
mHasVideo = true;
}
if (dropBufferWhileFlushing(audio, msg)) {
return;
}
sp<ABuffer> buffer;
CHECK(msg->findBuffer("buffer", &buffer));
sp<AMessage> notifyConsumed;
CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed));
QueueEntry entry;
entry.mBuffer = buffer;
entry.mNotifyConsumed = notifyConsumed;
entry.mOffset = 0;
entry.mFinalResult = OK;
if (audio) {
mAudioQueue.push_back(entry);
postDrainAudioQueue();
} else {
mVideoQueue.push_back(entry);
postDrainVideoQueue();
}
if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
return;
}
sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
// EOS signalled on either queue.
syncQueuesDone();
return;
}
int64_t firstAudioTimeUs;
int64_t firstVideoTimeUs;
CHECK(firstAudioBuffer->meta()
->findInt64("timeUs", &firstAudioTimeUs));
CHECK(firstVideoBuffer->meta()
->findInt64("timeUs", &firstVideoTimeUs));
int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
ALOGV("queueDiff = %.2f secs", diff / 1E6);
if (diff > 100000ll) {
// Audio data starts More than 0.1 secs before video.
// Drop some audio.
(*mAudioQueue.begin()).mNotifyConsumed->post();
mAudioQueue.erase(mAudioQueue.begin());
return;
}
syncQueuesDone();
}
示例13: onMessageReceived
//.........这里部分代码省略.........
mChosenVideoResolutionType,
mChosenVideoResolutionIndex,
&width,
&height,
NULL /* framesPerSecond */,
NULL /* interlaced */));
mClient->onDisplayConnected(
mClientInfo.mPlaybackSession
->getSurfaceTexture(),
width,
height,
mUsingHDCP
? IRemoteDisplayClient::kDisplayFlagSecure
: 0,
playbackSessionID);
}
}
finishPlay();
if (mState == ABOUT_TO_PLAY) {
mState = PLAYING;
}
} else if (what == PlaybackSession::kWhatSessionDestroyed) {
disconnectClient2();
} else {
CHECK_EQ(what, PlaybackSession::kWhatBinaryData);
int32_t channel;
CHECK(msg->findInt32("channel", &channel));
sp<ABuffer> data;
CHECK(msg->findBuffer("data", &data));
CHECK_LE(channel, 0xff);
CHECK_LE(data->size(), 0xffffu);
int32_t sessionID;
CHECK(msg->findInt32("sessionID", &sessionID));
char header[4];
header[0] = '$';
header[1] = channel;
header[2] = data->size() >> 8;
header[3] = data->size() & 0xff;
mNetSession->sendRequest(
sessionID, header, sizeof(header));
mNetSession->sendRequest(
sessionID, data->data(), data->size());
}
break;
}
case kWhatKeepAlive:
{
int32_t sessionID;
CHECK(msg->findInt32("sessionID", &sessionID));
if (mClientSessionID != sessionID) {
// Obsolete event, client is already gone.
break;
}
示例14: CHECK
void DashPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
int32_t audio;
CHECK(msg->findInt32("audio", &audio));
if (audio) {
mHasAudio = true;
} else {
mHasVideo = true;
}
if (dropBufferWhileFlushing(audio, msg)) {
return;
}
sp<ABuffer> buffer;
CHECK(msg->findBuffer("buffer", &buffer));
sp<AMessage> notifyConsumed;
CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed));
QueueEntry entry;
entry.mBuffer = buffer;
entry.mNotifyConsumed = notifyConsumed;
entry.mOffset = 0;
entry.mFinalResult = OK;
if (audio) {
mAudioQueue.push_back(entry);
int64_t audioTimeUs;
(buffer->meta())->findInt64("timeUs", &audioTimeUs);
if ((mHasVideo && mIsFirstVideoframeReceived)
|| !mHasVideo){
postDrainAudioQueue();
return;
}
else
{
mPendingPostAudioDrains = true;
DPR_MSG_HIGH("Not rendering Audio Sample with TS: %lld as Video frame is not decoded", audioTimeUs);
}
} else {
mVideoQueue.push_back(entry);
int64_t videoTimeUs;
(buffer->meta())->findInt64("timeUs", &videoTimeUs);
if (!mIsFirstVideoframeReceived) {
mIsFirstVideoframeReceived = true;
DPR_MSG_HIGH("Received first video Sample with TS: %lld", videoTimeUs);
if (mPendingPostAudioDrains) {
mPendingPostAudioDrains = false;
postDrainAudioQueue();
}
}
postDrainVideoQueue();
}
if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
return;
}
sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
// EOS signalled on either queue.
syncQueuesDone();
return;
}
int64_t firstAudioTimeUs;
int64_t firstVideoTimeUs;
CHECK(firstAudioBuffer->meta()
->findInt64("timeUs", &firstAudioTimeUs));
CHECK(firstVideoBuffer->meta()
->findInt64("timeUs", &firstVideoTimeUs));
int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
DPR_MSG_LOW("queueDiff = %.2f secs", diff / 1E6);
if (diff > 100000ll) {
// Audio data starts More than 0.1 secs before video.
// Drop some audio.
(*mAudioQueue.begin()).mNotifyConsumed->post();
mAudioQueue.erase(mAudioQueue.begin());
return;
}
syncQueuesDone();
}
示例15: onMessageReceived
void Converter::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatMediaPullerNotify:
{
int32_t what;
CHECK(msg->findInt32("what", &what));
if (!mIsPCMAudio && mEncoder == NULL) {
ALOGV("got msg '%s' after encoder shutdown.",
msg->debugString().c_str());
if (what == MediaPuller::kWhatAccessUnit) {
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
accessUnit->setMediaBufferBase(NULL);
}
break;
}
if (what == MediaPuller::kWhatEOS) {
mInputBufferQueue.push_back(NULL);
feedEncoderInputBuffers();
scheduleDoMoreWork();
} else {
CHECK_EQ(what, MediaPuller::kWhatAccessUnit);
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
if (mNumFramesToDrop > 0 || mEncodingSuspended) {
if (mNumFramesToDrop > 0) {
--mNumFramesToDrop;
ALOGI("dropping frame.");
}
accessUnit->setMediaBufferBase(NULL);
break;
}
#if 0
MediaBuffer *mbuf =
(MediaBuffer *)(accessUnit->getMediaBufferBase());
if (mbuf != NULL) {
ALOGI("queueing mbuf %p", mbuf);
mbuf->release();
}
#endif
#if ENABLE_SILENCE_DETECTION
if (!mIsVideo) {
if (IsSilence(accessUnit)) {
if (mInSilentMode) {
break;
}
int64_t nowUs = ALooper::GetNowUs();
if (mFirstSilentFrameUs < 0ll) {
mFirstSilentFrameUs = nowUs;
} else if (nowUs >= mFirstSilentFrameUs + 10000000ll) {
mInSilentMode = true;
ALOGI("audio in silent mode now.");
break;
}
} else {
if (mInSilentMode) {
ALOGI("audio no longer in silent mode.");
}
mInSilentMode = false;
mFirstSilentFrameUs = -1ll;
}
}
#endif
mInputBufferQueue.push_back(accessUnit);
feedEncoderInputBuffers();
scheduleDoMoreWork();
}
break;
}
case kWhatEncoderActivity:
{
#if 0
int64_t whenUs;
if (msg->findInt64("whenUs", &whenUs)) {
int64_t nowUs = ALooper::GetNowUs();
ALOGI("[%s] kWhatEncoderActivity after %lld us",
mIsVideo ? "video" : "audio", nowUs - whenUs);
}
#endif
mDoMoreWorkPending = false;
if (mEncoder == NULL) {
//.........这里部分代码省略.........