本文整理汇总了C++中sp::findData方法的典型用法代码示例。如果您正苦于以下问题:C++ sp::findData方法的具体用法?C++ sp::findData怎么用?C++ sp::findData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sp
的用法示例。
在下文中一共展示了sp::findData方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: reader
bool
CryptoFile::DoUpdate(sp<MetaData>& aMetaData)
{
const void* data;
size_t size;
uint32_t type;
// There's no point in checking that the type matches anything because it
// isn't set consistently in the MPEG4Extractor.
if (!aMetaData->findData(kKeyPssh, &type, &data, &size)) {
return false;
}
ByteReader reader(reinterpret_cast<const uint8_t*>(data), size);
while (reader.Remaining()) {
PsshInfo psshInfo;
if (!reader.ReadArray(psshInfo.uuid, 16)) {
return false;
}
if (!reader.CanReadType<uint32_t>()) {
return false;
}
auto length = reader.ReadType<uint32_t>();
if (!reader.ReadArray(psshInfo.data, length)) {
return false;
}
pssh.AppendElement(psshInfo);
}
return true;
}
示例2: getAacCodecSpecificData
void ExtendedCodec::getAacCodecSpecificData(
const sp<MetaData> &meta, const void* &data, size_t &size) {
uint32_t type = 0;
size = 0;
if (meta->findData(kKeyAacCodecSpecificData, &type, &data, &size)) {
ALOGV("OMXCodec::configureCodec found kKeyAacCodecSpecificData of size %d\n", size);
}
}
示例3: CHECK
sp<AMessage> DashPlayer::Decoder::makeFormat(const sp<MetaData> &meta) {
CHECK(mCSD.isEmpty());
sp<AMessage> msg;
uint32_t type;
const void *data;
size_t size;
CHECK_EQ(convertMetaDataToMessage(meta, &msg), (status_t)OK);
int32_t value;
if (meta->findInt32(kKeySmoothStreaming, &value)) {
msg->setInt32("smooth-streaming", value);
}
if (meta->findInt32(kKeyIsDRM, &value)) {
msg->setInt32("secure-op", 1);
}
if (meta->findInt32(kKeyRequiresSecureBuffers, &value)) {
msg->setInt32("requires-secure-buffers", 1);
}
if (meta->findInt32(kKeyEnableDecodeOrder, &value)) {
msg->setInt32("decodeOrderEnable", value);
}
if (meta->findData(kKeyAacCodecSpecificData, &type, &data, &size)) {
if (size > 0 && data != NULL) {
sp<ABuffer> buffer = new ABuffer(size);
if (buffer != NULL) {
memcpy(buffer->data(), data, size);
buffer->meta()->setInt32("csd", true);
buffer->meta()->setInt64("timeUs", 0);
msg->setBuffer("csd-0", buffer);
}
else {
ALOGE("kKeyAacCodecSpecificData ABuffer Allocation failed");
}
}
else {
ALOGE("Not a valid data pointer or size == 0");
}
}
mCSDIndex = 0;
for (size_t i = 0;; ++i) {
sp<ABuffer> csd;
if (!msg->findBuffer(StringPrintf("csd-%d", i).c_str(), &csd)) {
break;
}
mCSD.push(csd);
}
return msg;
}
示例4: displayAVCProfileLevelIfPossible
static void displayAVCProfileLevelIfPossible(const sp<MetaData>& meta) {
uint32_t type;
const void *data;
size_t size;
if (meta->findData(kKeyAVCC, &type, &data, &size)) {
const uint8_t *ptr = (const uint8_t *)data;
CHECK(size >= 7);
CHECK(ptr[0] == 1); // configurationVersion == 1
uint8_t profile = ptr[1];
uint8_t level = ptr[3];
fprintf(stderr, "AVC video profile %d and level %d\n", profile, level);
}
}
示例5: sizeof
// static
sp<WebmElement> WebmWriter::audioTrack(const sp<MetaData>& md) {
int32_t nChannels, samplerate;
uint32_t type;
const void *headerData1;
const char headerData2[] = { 3, 'v', 'o', 'r', 'b', 'i', 's', 7, 0, 0, 0,
'a', 'n', 'd', 'r', 'o', 'i', 'd', 0, 0, 0, 0, 1 };
const void *headerData3;
size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
CHECK(md->findInt32(kKeyChannelCount, &nChannels));
CHECK(md->findInt32(kKeySampleRate, &samplerate));
CHECK(md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1));
CHECK(md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3));
size_t codecPrivateSize = 1;
codecPrivateSize += XiphLaceCodeLen(headerSize1);
codecPrivateSize += XiphLaceCodeLen(headerSize2);
codecPrivateSize += headerSize1 + headerSize2 + headerSize3;
off_t off = 0;
sp<ABuffer> codecPrivateBuf = new ABuffer(codecPrivateSize);
uint8_t *codecPrivateData = codecPrivateBuf->data();
codecPrivateData[off++] = 2;
off += XiphLaceEnc(codecPrivateData + off, headerSize1);
off += XiphLaceEnc(codecPrivateData + off, headerSize2);
memcpy(codecPrivateData + off, headerData1, headerSize1);
off += headerSize1;
memcpy(codecPrivateData + off, headerData2, headerSize2);
off += headerSize2;
memcpy(codecPrivateData + off, headerData3, headerSize3);
sp<WebmElement> entry = WebmElement::AudioTrackEntry(
nChannels,
samplerate,
codecPrivateBuf);
return entry;
}
示例6: sizeof
static bool
FindData(sp<MetaData>& aMetaData, uint32_t aKey, nsTArray<T>* aDest)
{
const void* data;
size_t size;
uint32_t type;
aDest->Clear();
// There's no point in checking that the type matches anything because it
// isn't set consistently in the MPEG4Extractor.
if (!aMetaData->findData(aKey, &type, &data, &size) || size % sizeof(T)) {
return false;
}
aDest->AppendElements(reinterpret_cast<const T*>(data), size / sizeof(T));
return true;
}
示例7: WebmMaster
sp<WebmElement> WebmElement::VideoTrackEntry(
const char *codec,
uint64_t width,
uint64_t height,
const sp<MetaData> &meta,
uint64_t uid,
bool lacing,
const char *lang) {
if (uid == 0) {
uid = kVideoTrackNum;
}
List<sp<WebmElement> > trackEntryFields;
populateCommonTrackEntries(
kVideoTrackNum,
uid,
lacing,
lang,
codec,
kVideoType,
trackEntryFields);
// CSD
uint32_t type;
const void *data;
size_t size;
if (meta->findData(kKeyVp9CodecPrivate, &type, &data, &size)) {
sp<ABuffer> buf = new ABuffer((void *)data, size); // note: buf does not own data
trackEntryFields.push_back(new WebmBinary(kMkvCodecPrivate, buf));
}
List<sp<WebmElement> > videoInfo;
videoInfo.push_back(new WebmUnsigned(kMkvPixelWidth, width));
videoInfo.push_back(new WebmUnsigned(kMkvPixelHeight, height));
// Color aspects
{
List<sp<WebmElement> > colorInfo;
ColorAspects aspects;
aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
aspects.mTransfer = ColorAspects::TransferUnspecified;
aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
aspects.mRange = ColorAspects::RangeUnspecified;
bool havePrimaries = meta->findInt32(kKeyColorPrimaries, (int32_t*)&aspects.mPrimaries);
bool haveTransfer = meta->findInt32(kKeyTransferFunction, (int32_t*)&aspects.mTransfer);
bool haveCoeffs = meta->findInt32(kKeyColorMatrix, (int32_t*)&aspects.mMatrixCoeffs);
bool haveRange = meta->findInt32(kKeyColorRange, (int32_t*)&aspects.mRange);
int32_t primaries, transfer, coeffs;
bool fullRange;
ColorUtils::convertCodecColorAspectsToIsoAspects(
aspects, &primaries, &transfer, &coeffs, &fullRange);
if (havePrimaries) {
colorInfo.push_back(new WebmUnsigned(kMkvPrimaries, primaries));
}
if (haveTransfer) {
colorInfo.push_back(new WebmUnsigned(kMkvTransferCharacteristics, transfer));
}
if (haveCoeffs) {
colorInfo.push_back(new WebmUnsigned(kMkvMatrixCoefficients, coeffs));
}
if (haveRange) {
colorInfo.push_back(new WebmUnsigned(kMkvRange, fullRange ? 2 : 1));
}
// Also add HDR static info, some of which goes to MasteringMetadata element
const HDRStaticInfo *info;
uint32_t type;
const void *data;
size_t size;
if (meta->findData(kKeyHdrStaticInfo, &type, &data, &size)
&& type == 'hdrS' && size == sizeof(*info)) {
info = (const HDRStaticInfo*)data;
if (info->mID == HDRStaticInfo::kType1) {
List<sp<WebmElement> > masteringInfo;
// convert HDRStaticInfo values to matroska equivalent values for each non-0 group
if (info->sType1.mMaxFrameAverageLightLevel) {
colorInfo.push_back(new WebmUnsigned(
kMkvMaxFALL, info->sType1.mMaxFrameAverageLightLevel));
}
if (info->sType1.mMaxContentLightLevel) {
colorInfo.push_back(new WebmUnsigned(
kMkvMaxCLL, info->sType1.mMaxContentLightLevel));
}
if (info->sType1.mMinDisplayLuminance) {
// HDRStaticInfo Type1 stores min luminance scaled 10000:1
masteringInfo.push_back(new WebmFloat(
kMkvLuminanceMin, info->sType1.mMinDisplayLuminance * 0.0001));
}
if (info->sType1.mMaxDisplayLuminance) {
masteringInfo.push_back(new WebmFloat(
kMkvLuminanceMax, (float)info->sType1.mMaxDisplayLuminance));
}
// HDRStaticInfo Type1 stores primaries scaled 50000:1
if (info->sType1.mW.x || info->sType1.mW.y) {
masteringInfo.push_back(new WebmFloat(
kMkvWhitePointChromaticityX, info->sType1.mW.x * 0.00002));
//.........这里部分代码省略.........