本文整理汇总了C++中sp::getFormat方法的典型用法代码示例。如果您正苦于以下问题:C++ sp::getFormat方法的具体用法?C++ sp::getFormat怎么用?C++ sp::getFormat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sp
的用法示例。
在下文中一共展示了sp::getFormat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: SimpleDecodingSource
//static
sp<SimpleDecodingSource> SimpleDecodingSource::Create(
const sp<IMediaSource> &source, uint32_t flags, const sp<ANativeWindow> &nativeWindow,
const char *desiredCodec) {
sp<Surface> surface = static_cast<Surface*>(nativeWindow.get());
const char *mime = NULL;
sp<MetaData> meta = source->getFormat();
CHECK(meta->findCString(kKeyMIMEType, &mime));
sp<AMessage> format = new AMessage;
if (convertMetaDataToMessage(source->getFormat(), &format) != OK) {
return NULL;
}
Vector<AString> matchingCodecs;
MediaCodecList::findMatchingCodecs(
mime, false /* encoder */, flags, &matchingCodecs);
sp<ALooper> looper = new ALooper;
looper->setName("stagefright");
looper->start();
sp<MediaCodec> codec;
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
if (desiredCodec != NULL && componentName.compare(desiredCodec)) {
continue;
}
ALOGV("Attempting to allocate codec '%s'", componentName.c_str());
codec = MediaCodec::CreateByComponentName(looper, componentName);
if (codec != NULL) {
ALOGI("Successfully allocated codec '%s'", componentName.c_str());
status_t err = codec->configure(format, surface, NULL /* crypto */, 0 /* flags */);
if (err == OK) {
err = codec->getOutputFormat(&format);
}
if (err == OK) {
return new SimpleDecodingSource(codec, source, looper,
surface != NULL,
strcmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS) == 0,
format);
}
ALOGD("Failed to configure codec '%s'", componentName.c_str());
codec->release();
codec = NULL;
}
}
looper->stop();
ALOGE("No matching decoder! (mime: %s)", mime);
return NULL;
}
示例2: addSource
status_t OggWriter::addSource(const sp<MediaSource> &source)
{
LOGV("OggWriter::addSource");
if (mInitCheck != OK)
{
return mInitCheck;
}
if (mSource != NULL)
{
// Ogg files only support a single track of audio.
return UNKNOWN_ERROR;
}
sp<MetaData> meta = source->getFormat();
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
if (strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS))
{
return ERROR_UNSUPPORTED;
}
meta->findInt32(kKeySampleRate, &mSampleRate);
mSource = source;
return OK;
}
示例3: Create
sp<MediaSource> prepareVideoEncoder(const sp<ALooper>& looper,
const sp<MediaSource>& source) {
sp<MetaData> meta = source->getFormat();
int32_t width, height, stride, sliceHeight, colorFormat;
CHECK(meta->findInt32(kKeyWidth, &width));
CHECK(meta->findInt32(kKeyHeight, &height));
CHECK(meta->findInt32(kKeyStride, &stride));
CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));
CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));
sp<AMessage> format = new AMessage();
format->setInt32("width", width);
format->setInt32("height", height);
format->setInt32("stride", stride);
format->setInt32("slice-height", sliceHeight);
format->setInt32("color-format", colorFormat);
format->setString("mime", kMimeTypeAvc);
format->setInt32("bitrate", sVideoBitRateInK * 1024);
format->setInt32("bitrate-mode", OMX_Video_ControlRateVariable);
format->setFloat("frame-rate", sFPS);
format->setInt32("i-frame-interval-ms", sIFrameIntervalMs);
return MediaCodecSource::Create(
looper,
format,
source,
#ifdef TARGET_GE_MARSHMALLOW
NULL,
#endif
sUseMetaDataMode ? MediaCodecSource::FLAG_USE_METADATA_INPUT : 0
);
}
示例4: CHECK
int
CEncoderLame::syncEncode(
const sp<IMediaSource>& pMediaSource_in,
const sp<IAudioSink>& pAudioSink_out,
const sp<AMessage>& pOption_in
)
{
AUTO_LOG();
CHECK_PTR_EXT(pMediaSource_in, BAD_VALUE);
CHECK_PTR_EXT(pAudioSink_out, BAD_VALUE);
sp<MetaData> meta = pMediaSource_in->getFormat();
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
int ret = prepare(pMediaSource_in, pAudioSink_out, pOption_in);
if (OK == ret) {
ret = encode( pMediaSource_in, pAudioSink_out);
}
finish();
CHECK_IS_EXT((OK == ret), ret);
} else {
RETURN(INVALID_OPERATION);
}
RETURN(OK);
}
示例5: addInBandTextSource
status_t TimedTextDriver::addInBandTextSource(
size_t trackIndex, const sp<MediaSource>& mediaSource) {
sp<TimedTextSource> source =
TimedTextSource::CreateTimedTextSource(mediaSource);
if (source == NULL) {
return ERROR_UNSUPPORTED;
}
const char *mime;
uint32_t vobSubFlag = 0;
if (mediaSource->getFormat()->findCString(kKeyMIMEType, &mime)) {
if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_MATROSKA_VOBSUB) == 0) {
vobSubFlag = 1;
}
if (mObserver) {
mObserver->subtitleNotify(SUBTITLE_MSG_VOBSUB_FLAG, &vobSubFlag);
}
}
Mutex::Autolock autoLock(mLock);
mTextSourceVector.add(trackIndex, source);
mTextSourceTypeVector.add(TEXT_SOURCE_TYPE_IN_BAND);
return OK;
}
示例6: if
// static
sp<TimedTextSource> TimedTextSource::CreateTimedTextSource(
const sp<MediaSource>& mediaSource) {
const char *mime;
CHECK(mediaSource->getFormat()->findCString(kKeyMIMEType, &mime));
ALOGE("[PANDA] CreateTimedTextSource, type = %s\n", mime);
if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0) {
return new TimedText3GPPSource(mediaSource);
}
#ifdef MTK_SUBTITLE_SUPPORT
else if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_ASS) == 0) {
return new TimedTextASSSource(mediaSource);
}
else if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_SSA) == 0) {
return new TimedTextSSASource(mediaSource);
}
else if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_VOBSUB) == 0) {
return new TimedTextVOBSUBSource(mediaSource);
}
else if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_DVB) == 0) {
return new TimedTextDVBSource(mediaSource);
}
else if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_TXT) == 0) {
return new TimedTextTXTSource(mediaSource);
}
#endif
ALOGE("Unsupported mime type for subtitle. : %s", mime);
return NULL;
}
示例7: addSource
status_t WebmWriter::addSource(const sp<MediaSource> &source) {
Mutex::Autolock l(mLock);
if (mStarted) {
ALOGE("Attempt to add source AFTER recording is started");
return UNKNOWN_ERROR;
}
// At most 2 tracks can be supported.
if (mStreams[kVideoIndex].mTrackEntry != NULL
&& mStreams[kAudioIndex].mTrackEntry != NULL) {
ALOGE("Too many tracks (2) to add");
return ERROR_UNSUPPORTED;
}
CHECK(source != NULL);
// A track of type other than video or audio is not supported.
const char *mime;
source->getFormat()->findCString(kKeyMIMEType, &mime);
const char *vp8 = MEDIA_MIMETYPE_VIDEO_VP8;
const char *vorbis = MEDIA_MIMETYPE_AUDIO_VORBIS;
size_t streamIndex;
if (!strncasecmp(mime, vp8, strlen(vp8))) {
streamIndex = kVideoIndex;
} else if (!strncasecmp(mime, vorbis, strlen(vorbis))) {
streamIndex = kAudioIndex;
} else {
ALOGE("Track (%s) other than %s or %s is not supported", mime, vp8, vorbis);
return ERROR_UNSUPPORTED;
}
// No more than one video or one audio track is supported.
if (mStreams[streamIndex].mTrackEntry != NULL) {
ALOGE("%s track already exists", mStreams[streamIndex].mName);
return ERROR_UNSUPPORTED;
}
// This is the first track of either audio or video.
// Go ahead to add the track.
mStreams[streamIndex].mSource = source;
mStreams[streamIndex].mTrackEntry = mStreams[streamIndex].mMakeTrack(source->getFormat());
return OK;
}
示例8: SetVideoFormat
bool OmxDecoder::SetVideoFormat() {
const char *componentName;
if (!mVideoSource->getFormat()->findInt32(kKeyWidth, &mVideoWidth) ||
!mVideoSource->getFormat()->findInt32(kKeyHeight, &mVideoHeight) ||
!mVideoSource->getFormat()->findCString(kKeyDecoderComponent, &componentName) ||
!mVideoSource->getFormat()->findInt32(kKeyColorFormat, &mVideoColorFormat) ) {
return false;
}
if (!mVideoSource->getFormat()->findInt32(kKeyStride, &mVideoStride)) {
mVideoStride = mVideoWidth;
LOG("stride not available, assuming width");
}
if (!mVideoSource->getFormat()->findInt32(kKeySliceHeight, &mVideoSliceHeight)) {
mVideoSliceHeight = mVideoHeight;
LOG("slice height not available, assuming height");
}
if (!mVideoSource->getFormat()->findInt32(kKeyRotation, &mVideoRotation)) {
mVideoRotation = 0;
LOG("rotation not available, assuming 0");
}
LOG("width: %d height: %d component: %s format: %d stride: %d sliceHeight: %d rotation: %d",
mVideoWidth, mVideoHeight, componentName, mVideoColorFormat,
mVideoStride, mVideoSliceHeight, mVideoRotation);
return true;
}
示例9: CHECK
sp<MediaSource> OmxJpegImageDecoder::getDecoder(
OMXClient *client, const sp<MediaSource>& source) {
sp<MetaData> meta = source->getFormat();
sp<MediaSource> decoder = OMXCodec::Create(
client->interface(), meta, false /* createEncoder */, source);
CHECK(decoder != NULL);
return decoder;
}
示例10: mOwner
MPEG4Writer::Track::Track(
MPEG4Writer *owner, const sp<MediaSource> &source)
: mOwner(owner),
mMeta(source->getFormat()),
mSource(source),
mDone(false),
mCodecSpecificData(NULL),
mCodecSpecificDataSize(0),
mReachedEOS(false) {
}
示例11: CHECK
// static
sp<TimedTextSource> TimedTextSource::CreateTimedTextSource(
const sp<MediaSource>& mediaSource) {
const char *mime;
CHECK(mediaSource->getFormat()->findCString(kKeyMIMEType, &mime));
if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0) {
return new TimedText3GPPSource(mediaSource);
}
ALOGE("Unsupported mime type for subtitle. : %s", mime);
return NULL;
}
示例12: performSeekTest
static void performSeekTest(const sp<MediaSource> &source) {
CHECK_EQ((status_t)OK, source->start());
int64_t durationUs;
CHECK(source->getFormat()->findInt64(kKeyDuration, &durationUs));
for (int64_t seekTimeUs = 0; seekTimeUs <= durationUs;
seekTimeUs += 60000ll) {
MediaSource::ReadOptions options;
options.setSeekTo(
seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
MediaBuffer *buffer;
status_t err;
for (;;) {
err = source->read(&buffer, &options);
options.clearSeekTo();
if (err == INFO_FORMAT_CHANGED) {
CHECK(buffer == NULL);
continue;
}
if (err != OK) {
CHECK(buffer == NULL);
break;
}
if (buffer->range_length() > 0) {
break;
}
CHECK(buffer != NULL);
buffer->release();
buffer = NULL;
}
if (err == OK) {
int64_t timeUs;
CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
printf("%lld\t%lld\t%lld\n", seekTimeUs, timeUs, seekTimeUs - timeUs);
buffer->release();
buffer = NULL;
} else {
printf("ERROR\n");
break;
}
}
CHECK_EQ((status_t)OK, source->stop());
}
示例13:
sp<MetaData> MPEG2TSSource::getFormat() {
sp<MetaData> meta = mImpl->getFormat();
int64_t durationUs;
if (mExtractor->mLiveSession != NULL
&& mExtractor->mLiveSession->getDuration(&durationUs) == OK) {
meta->setInt64(kKeyDuration, durationUs);
}
return meta;
}
示例14: SetAudioFormat
bool OmxDecoder::SetAudioFormat() {
// If the format changed, update our cached info.
if (!mAudioSource->getFormat()->findInt32(kKeyChannelCount, &mAudioChannels) ||
!mAudioSource->getFormat()->findInt32(kKeySampleRate, &mAudioSampleRate)) {
return false;
}
LOG("channelCount: %d sampleRate: %d",
mAudioChannels, mAudioSampleRate);
return true;
}
示例15:
sp<MetaData> MPEG2TSSource::getFormat() {
sp<MetaData> meta = mImpl->getFormat();
int64_t durationUs;
/*no for live source add by Hadwin
if (mExtractor->mLiveSource != NULL
&& mExtractor->mLiveSource->getDuration(&durationUs)) {
meta->setInt64(kKeyDuration, durationUs);
}
*/
return meta;
}