本文整理汇总了C++中sp::findInt64方法的典型用法代码示例。如果您正苦于以下问题:C++ sp::findInt64方法的具体用法?C++ sp::findInt64怎么用?C++ sp::findInt64使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sp
的用法示例。
在下文中一共展示了sp::findInt64方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: autoLock
void NuCachedSource2::onRead(const sp<AMessage> &msg) {
ALOGV("onRead");
int64_t offset;
CHECK(msg->findInt64("offset", &offset));
void *data;
CHECK(msg->findPointer("data", &data));
size_t size;
CHECK(msg->findSize("size", &size));
ssize_t result = readInternal(offset, data, size);
if (result == -EAGAIN) {
msg->post(50000);
return;
}
Mutex::Autolock autoLock(mLock);
if (mDisconnecting) {
mCondition.signal();
return;
}
CHECK(mAsyncResult == NULL);
mAsyncResult = new AMessage;
mAsyncResult->setInt32("result", result);
mCondition.signal();
}
示例2: onDecoderNotify
void DirectRenderer::onDecoderNotify(const sp<AMessage> &msg) {
size_t trackIndex;
CHECK(msg->findSize("trackIndex", &trackIndex));
int32_t what;
CHECK(msg->findInt32("what", &what));
switch (what) {
case DecoderContext::kWhatOutputBufferReady:
{
size_t index;
CHECK(msg->findSize("index", &index));
int64_t timeUs;
CHECK(msg->findInt64("timeUs", &timeUs));
sp<ABuffer> buffer;
CHECK(msg->findBuffer("buffer", &buffer));
queueOutputBuffer(trackIndex, index, timeUs, buffer);
break;
}
default:
TRESPASS();
}
}
示例3:
static int64_t
FindInt64(sp<MetaData>& mMetaData, uint32_t mKey)
{
int64_t value;
if (!mMetaData->findInt64(mKey, &value))
return 0;
return value;
}
示例4: onMessageReceived
//.........这里部分代码省略.........
{
int32_t what;
CHECK(msg->findInt32("what", &what));
if (what == Renderer::kWhatEOS) {
int32_t audio;
CHECK(msg->findInt32("audio", &audio));
int32_t finalResult;
CHECK(msg->findInt32("finalResult", &finalResult));
if (audio) {
mAudioEOS = true;
} else {
mVideoEOS = true;
}
if (finalResult == ERROR_END_OF_STREAM) {
ALOGV("reached %s EOS", audio ? "audio" : "video");
} else {
ALOGE("%s track encountered an error (%d)",
audio ? "audio" : "video", finalResult);
notifyListener(
MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, finalResult);
}
if ((mAudioEOS || mAudioDecoder == NULL)
&& (mVideoEOS || mVideoDecoder == NULL)) {
notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0);
}
} else if (what == Renderer::kWhatPosition) {
int64_t positionUs;
CHECK(msg->findInt64("positionUs", &positionUs));
CHECK(msg->findInt64("videoLateByUs", &mVideoLateByUs));
if (mDriver != NULL) {
sp<NuPlayerDriver> driver = mDriver.promote();
if (driver != NULL) {
driver->notifyPosition(positionUs);
driver->notifyFrameStats(
mNumFramesTotal, mNumFramesDropped);
}
}
} else if (what == Renderer::kWhatFlushComplete) {
int32_t audio;
CHECK(msg->findInt32("audio", &audio));
ALOGV("renderer %s flush completed.", audio ? "audio" : "video");
} else if (what == Renderer::kWhatVideoRenderingStart) {
notifyListener(MEDIA_INFO, MEDIA_INFO_RENDERING_START, 0);
} else if (what == Renderer::kWhatMediaRenderingStart) {
ALOGV("media rendering started");
notifyListener(MEDIA_STARTED, 0, 0);
}
break;
}
case kWhatMoreDataQueued:
{
break;
}
case kWhatReset:
示例5: ConvertMessageToMap
status_t ConvertMessageToMap(
JNIEnv *env, const sp<AMessage> &msg, jobject *map) {
ScopedLocalRef<jclass> hashMapClazz(
env, env->FindClass("java/util/HashMap"));
if (hashMapClazz.get() == NULL) {
return -EINVAL;
}
jmethodID hashMapConstructID =
env->GetMethodID(hashMapClazz.get(), "<init>", "()V");
if (hashMapConstructID == NULL) {
return -EINVAL;
}
jmethodID hashMapPutID =
env->GetMethodID(
hashMapClazz.get(),
"put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;");
if (hashMapPutID == NULL) {
return -EINVAL;
}
jobject hashMap = env->NewObject(hashMapClazz.get(), hashMapConstructID);
for (size_t i = 0; i < msg->countEntries(); ++i) {
AMessage::Type valueType;
const char *key = msg->getEntryNameAt(i, &valueType);
jobject valueObj = NULL;
switch (valueType) {
case AMessage::kTypeInt32:
{
int32_t val;
CHECK(msg->findInt32(key, &val));
valueObj = makeIntegerObject(env, val);
break;
}
case AMessage::kTypeInt64:
{
int64_t val;
CHECK(msg->findInt64(key, &val));
valueObj = makeLongObject(env, val);
break;
}
case AMessage::kTypeFloat:
{
float val;
CHECK(msg->findFloat(key, &val));
valueObj = makeFloatObject(env, val);
break;
}
case AMessage::kTypeString:
{
AString val;
CHECK(msg->findString(key, &val));
valueObj = env->NewStringUTF(val.c_str());
break;
}
case AMessage::kTypeBuffer:
{
sp<ABuffer> buffer;
CHECK(msg->findBuffer(key, &buffer));
valueObj = makeByteBufferObject(
env, buffer->data(), buffer->size());
break;
}
case AMessage::kTypeRect:
{
int32_t left, top, right, bottom;
CHECK(msg->findRect(key, &left, &top, &right, &bottom));
SetMapInt32(
env,
hashMap,
hashMapPutID,
StringPrintf("%s-left", key).c_str(),
left);
SetMapInt32(
env,
hashMap,
hashMapPutID,
StringPrintf("%s-top", key).c_str(),
top);
//.........这里部分代码省略.........
示例6: onMessageReceived
void RTSPSource::onMessageReceived(const sp<AMessage> &msg) {
if (msg->what() == kWhatDisconnect) {
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
mDisconnectReplyID = replyID;
finishDisconnectIfPossible();
return;
} else if (msg->what() == kWhatPerformSeek) {
int32_t generation;
CHECK(msg->findInt32("generation", &generation));
if (generation != mSeekGeneration) {
// obsolete.
return;
}
int64_t seekTimeUs;
CHECK(msg->findInt64("timeUs", &seekTimeUs));
performSeek(seekTimeUs);
return;
} else if (msg->what() == kWhatPerformPlay) {
int64_t playTimeUs;
CHECK(msg->findInt64("timeUs", &playTimeUs));
performPlay(playTimeUs);
return;
} else if (msg->what() == kWhatPerformPause) {
performPause();
return;
} else if (msg->what() == kWhatPerformResume) {
performResume();
return;
} else if (msg->what() == kWhatPerformSuspend) {
performSuspend();
return;
}
CHECK_EQ(msg->what(), (uint32_t)kWhatNotify);
int32_t what;
int32_t isSeekable = 0;
CHECK(msg->findInt32("what", &what));
switch (what) {
case RtspConnectionHandler::kWhatConnected:
CHECK(msg->findInt32("isSeekable", &isSeekable));
onConnected((isSeekable ? true:false));
break;
case RtspConnectionHandler::kWhatDisconnected:
onDisconnected(msg);
break;
case RtspConnectionHandler::kWhatSeekDone:
{
mState = PLAYING;
// Even if we have reset mLatestPausedUnit in performSeek(),
// it's still possible that kWhatPausedDone event may arrive
// because of previous performPause() command.
for (size_t i = 0; i < mTracks.size(); ++i) {
TrackInfo *info = &mTracks.editItemAt(i);
info->mLatestPausedUnit = 0;
}
mLatestPausedUnit = 0;
break;
}
case RtspConnectionHandler::kWhatPausedDone:
{
for (size_t i = 0; i < mTracks.size(); ++i) {
TrackInfo *info = &mTracks.editItemAt(i);
info->mLatestPausedUnit = info->mLatestReceivedUnit;
}
// The timestamp after a 'Pause' is done is the earliest
// timestamp among all of the latest received units.
TrackInfo *info = &mTracks.editItemAt(0);
mLatestPausedUnit = info->mLatestReceivedUnit;
for (size_t i = 1; i < mTracks.size(); ++i) {
TrackInfo *info = &mTracks.editItemAt(i);
if (mLatestPausedUnit > info->mLatestReceivedUnit) {
mLatestPausedUnit = info->mLatestReceivedUnit;
}
}
break;
}
case RtspConnectionHandler::kWhatAccessUnit:
{
size_t trackIndex;
CHECK(msg->findSize("trackIndex", &trackIndex));
CHECK_LT(trackIndex, mTracks.size());
sp<RefBase> obj;
CHECK(msg->findObject("accessUnit", &obj));
sp<ABuffer> accessUnit = static_cast<ABuffer *>(obj.get());
int32_t damaged;
//.........这里部分代码省略.........
示例7: read
status_t APESource::read(
MediaBuffer **out, const ReadOptions *options)
{
*out = NULL;
uint32_t newframe = 0 , firstbyte = 0;
///LOGV("APESource::read");
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
int32_t bitrate = 0;
if (!mMeta->findInt32(kKeyBitRate, &bitrate)
|| !mMeta->findInt32(kKeySampleRate, &mSampleRate))
{
LOGI("no bitrate");
return ERROR_UNSUPPORTED;
}
if (options != NULL && options->getSeekTo(&seekTimeUs, &mode))
{
{
int64_t duration = 0;
int64_t seektabletime = 0;
if ((mTotalsample > 0) && (mTableOfContents[0] > 0) && (mSamplesPerFrame > 0)
&& mMeta->findInt64(kKeyDuration, &duration))
{
ape_parser_ctx_t ape_ctx;
uint32_t filepos, blocks_to_skip;
ape_ctx.samplerate = mSampleRate;
ape_ctx.blocksperframe = mSamplesPerFrame;
ape_ctx.totalframes = mTotalFrame;
ape_ctx.seektable = mTableOfContents;
ape_ctx.firstframe = mTableOfContents[0];
if (ape_calc_seekpos_by_microsecond(&ape_ctx,
seekTimeUs,
&newframe,
&filepos,
&firstbyte,
&blocks_to_skip) < 0)
{
LOGD("getseekto error exit");
return ERROR_UNSUPPORTED;
}
mCurrentPos = filepos;
mCurrentTimeUs = (int64_t)newframe * mSamplesPerFrame * 1000000ll / mSampleRate;
LOGD("getseekto seekTimeUs=%lld, Actual time%lld, filepos%x,frame %d, seekbyte %d", seekTimeUs, mCurrentTimeUs, mCurrentPos, newframe, firstbyte);
}
else
{
LOGD("getseekto parameter error exit");
return ERROR_UNSUPPORTED;
}
}
}
if ((mFileoffset != 0)
&& (mCurrentPos >= mFileoffset))
{
LOGD("APESource::readAt to end filesize %x curr: %x", mFileoffset, mCurrentPos);
return ERROR_END_OF_STREAM;
}
MediaBuffer *buffer;
status_t err = mGroup->acquire_buffer(&buffer);
if (err != OK)
{
LOGD("APESource::acquire_buffer fail");
return err;
}
size_t frame_size;
frame_size = kMaxFrameSize;
ssize_t n = 0;
#ifdef ENABLE_MMRIOTHREAD
if (options != NULL && options->getSeekTo(&seekTimeUs, &mode))
{
ResetReadioPtr(mCurrentPos);
}
n = ReadBitsteam(buffer->data(), frame_size);
#else
///frame_size = mMaxBufferSize;
n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size);
#endif
///LOGE("APESource::readAt %x, %x, %d, %d, %d, %d, %d", mCurrentPos, buffer->data(), buffer->size(), mTotalsample, bitrate, mSampleRate, frame_size);
//ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size);
//.........这里部分代码省略.........
示例8: mInitCheck
MP3Extractor::MP3Extractor(
const sp<DataSource> &source, const sp<AMessage> &meta)
: mInitCheck(NO_INIT),
mDataSource(source),
mFirstFramePos(-1),
mFixedHeader(0) {
off64_t pos = 0;
off64_t post_id3_pos;
uint32_t header;
bool success;
int64_t meta_offset;
uint32_t meta_header;
int64_t meta_post_id3_offset;
if (meta != NULL
&& meta->findInt64("offset", &meta_offset)
&& meta->findInt32("header", (int32_t *)&meta_header)
&& meta->findInt64("post-id3-offset", &meta_post_id3_offset)) {
// The sniffer has already done all the hard work for us, simply
// accept its judgement.
pos = (off64_t)meta_offset;
header = meta_header;
post_id3_pos = (off64_t)meta_post_id3_offset;
success = true;
} else {
success = Resync(mDataSource, 0, &pos, &post_id3_pos, &header);
}
if (!success) {
// mInitCheck will remain NO_INIT
return;
}
mFirstFramePos = pos;
mFixedHeader = header;
size_t frame_size;
int sample_rate;
int num_channels;
int bitrate;
GetMPEGAudioFrameSize(
header, &frame_size, &sample_rate, &num_channels, &bitrate);
mMeta = new MetaData;
mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
mMeta->setInt32(kKeySampleRate, sample_rate);
mMeta->setInt32(kKeyBitRate, bitrate * 1000);
mMeta->setInt32(kKeyChannelCount, num_channels);
mSeeker = XINGSeeker::CreateFromSource(mDataSource, mFirstFramePos);
if (mSeeker == NULL) {
mSeeker = VBRISeeker::CreateFromSource(mDataSource, post_id3_pos);
}
int64_t durationUs;
if (mSeeker == NULL || !mSeeker->getDuration(&durationUs)) {
off64_t fileSize;
if (mDataSource->getSize(&fileSize) == OK) {
durationUs = 8000LL * (fileSize - mFirstFramePos) / bitrate;
} else {
durationUs = -1;
}
}
if (durationUs >= 0) {
mMeta->setInt64(kKeyDuration, durationUs);
}
mInitCheck = OK;
}
示例9: onMessageReceived
void Converter::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatMediaPullerNotify:
{
int32_t what;
CHECK(msg->findInt32("what", &what));
if (!mIsPCMAudio && mEncoder == NULL) {
ALOGV("got msg '%s' after encoder shutdown.",
msg->debugString().c_str());
if (what == MediaPuller::kWhatAccessUnit) {
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
accessUnit->setMediaBufferBase(NULL);
}
break;
}
if (what == MediaPuller::kWhatEOS) {
mInputBufferQueue.push_back(NULL);
feedEncoderInputBuffers();
scheduleDoMoreWork();
} else {
CHECK_EQ(what, MediaPuller::kWhatAccessUnit);
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
if (mNumFramesToDrop > 0 || mEncodingSuspended) {
if (mNumFramesToDrop > 0) {
--mNumFramesToDrop;
ALOGI("dropping frame.");
}
accessUnit->setMediaBufferBase(NULL);
break;
}
#if 0
MediaBuffer *mbuf =
(MediaBuffer *)(accessUnit->getMediaBufferBase());
if (mbuf != NULL) {
ALOGI("queueing mbuf %p", mbuf);
mbuf->release();
}
#endif
#if ENABLE_SILENCE_DETECTION
if (!mIsVideo) {
if (IsSilence(accessUnit)) {
if (mInSilentMode) {
break;
}
int64_t nowUs = ALooper::GetNowUs();
if (mFirstSilentFrameUs < 0ll) {
mFirstSilentFrameUs = nowUs;
} else if (nowUs >= mFirstSilentFrameUs + 10000000ll) {
mInSilentMode = true;
ALOGI("audio in silent mode now.");
break;
}
} else {
if (mInSilentMode) {
ALOGI("audio no longer in silent mode.");
}
mInSilentMode = false;
mFirstSilentFrameUs = -1ll;
}
}
#endif
mInputBufferQueue.push_back(accessUnit);
feedEncoderInputBuffers();
scheduleDoMoreWork();
}
break;
}
case kWhatEncoderActivity:
{
#if 0
int64_t whenUs;
if (msg->findInt64("whenUs", &whenUs)) {
int64_t nowUs = ALooper::GetNowUs();
ALOGI("[%s] kWhatEncoderActivity after %lld us",
mIsVideo ? "video" : "audio", nowUs - whenUs);
}
#endif
mDoMoreWorkPending = false;
if (mEncoder == NULL) {
//.........这里部分代码省略.........
示例10: CHECK
void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) {
if (msg->what() == kWhatDisconnect) {
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
mDisconnectReplyID = replyID;
finishDisconnectIfPossible();
return;
} else if (msg->what() == kWhatPerformSeek) {
int32_t generation;
CHECK(msg->findInt32("generation", &generation));
CHECK(msg->senderAwaitsResponse(&mSeekReplyID));
if (generation != mSeekGeneration) {
// obsolete.
finishSeek(OK);
return;
}
int64_t seekTimeUs;
CHECK(msg->findInt64("timeUs", &seekTimeUs));
performSeek(seekTimeUs);
return;
} else if (msg->what() == kWhatPollBuffering) {
onPollBuffering();
return;
} else if (msg->what() == kWhatSignalEOS) {
onSignalEOS(msg);
return;
}
CHECK_EQ(msg->what(), (int)kWhatNotify);
int32_t what;
CHECK(msg->findInt32("what", &what));
switch (what) {
case MyHandler::kWhatConnected:
{
onConnected();
notifyVideoSizeChanged();
uint32_t flags = 0;
if (mHandler->isSeekable()) {
flags = FLAG_CAN_PAUSE
| FLAG_CAN_SEEK
| FLAG_CAN_SEEK_BACKWARD
| FLAG_CAN_SEEK_FORWARD;
}
notifyFlagsChanged(flags);
schedulePollBuffering();
break;
}
case MyHandler::kWhatDisconnected:
{
onDisconnected(msg);
break;
}
case MyHandler::kWhatSeekDone:
{
mState = CONNECTED;
// Unblock seekTo here in case we attempted to seek in a live stream
finishSeek(OK);
break;
}
case MyHandler::kWhatSeekPaused:
{
sp<AnotherPacketSource> source = getSource(true /* audio */);
if (source != NULL) {
source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
/* extra */ NULL,
/* discard */ true);
}
source = getSource(false /* video */);
if (source != NULL) {
source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
/* extra */ NULL,
/* discard */ true);
};
status_t err = OK;
msg->findInt32("err", &err);
if (err == OK) {
int64_t timeUs;
CHECK(msg->findInt64("time", &timeUs));
mHandler->continueSeekAfterPause(timeUs);
} else {
finishSeek(err);
}
break;
}
//.........这里部分代码省略.........
示例11: onMessageReceived
void TimedTextPlayer::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatPause: {
mPaused = true;
break;
}
case kWhatResume: {
mPaused = false;
if (mPendingSeekTimeUs != kInvalidTimeUs) {
seekToAsync(mPendingSeekTimeUs);
mPendingSeekTimeUs = kInvalidTimeUs;
} else {
doRead();
}
break;
}
case kWhatStart: {
sp<MediaPlayerBase> listener = mListener.promote();
if (listener == NULL) {
ALOGE("Listener is NULL when kWhatStart is received.");
break;
}
mPaused = false;
mPendingSeekTimeUs = kInvalidTimeUs;
int32_t positionMs = 0;
listener->getCurrentPosition(&positionMs);
int64_t seekTimeUs = positionMs * 1000ll;
notifyListener();
mSendSubtitleGeneration++;
doSeekAndRead(seekTimeUs);
break;
}
case kWhatRetryRead: {
int32_t generation = -1;
CHECK(msg->findInt32("generation", &generation));
if (generation != mSendSubtitleGeneration) {
// Drop obsolete msg.
break;
}
int64_t seekTimeUs;
int seekMode;
if (msg->findInt64("seekTimeUs", &seekTimeUs) &&
msg->findInt32("seekMode", &seekMode)) {
MediaSource::ReadOptions options;
options.setSeekTo(
seekTimeUs,
static_cast<MediaSource::ReadOptions::SeekMode>(seekMode));
doRead(&options);
} else {
doRead();
}
break;
}
case kWhatSeek: {
int64_t seekTimeUs = kInvalidTimeUs;
// Clear a displayed timed text before seeking.
notifyListener();
msg->findInt64("seekTimeUs", &seekTimeUs);
if (seekTimeUs == kInvalidTimeUs) {
sp<MediaPlayerBase> listener = mListener.promote();
if (listener != NULL) {
int32_t positionMs = 0;
listener->getCurrentPosition(&positionMs);
seekTimeUs = positionMs * 1000ll;
}
}
if (mPaused) {
mPendingSeekTimeUs = seekTimeUs;
break;
}
mSendSubtitleGeneration++;
doSeekAndRead(seekTimeUs);
break;
}
case kWhatSendSubtitle: {
int32_t generation;
CHECK(msg->findInt32("generation", &generation));
if (generation != mSendSubtitleGeneration) {
// Drop obsolete msg.
break;
}
// If current time doesn't reach to the fire time,
// re-post the message with the adjusted delay time.
int64_t fireTimeUs = kInvalidTimeUs;
if (msg->findInt64("fireTimeUs", &fireTimeUs)) {
// TODO: check if fireTimeUs is not kInvalidTimeUs.
int64_t delayUs = delayUsFromCurrentTime(fireTimeUs);
if (delayUs > 0) {
msg->post(delayUs);
break;
}
}
sp<RefBase> obj;
if (msg->findObject("subtitle", &obj)) {
sp<ParcelEvent> parcelEvent;
parcelEvent = static_cast<ParcelEvent*>(obj.get());
notifyListener(&(parcelEvent->parcel));
doRead();
} else {
//.........这里部分代码省略.........
示例12: onMessageReceived
void TimedTextPlayer::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatPause: {
mSendSubtitleGeneration++;
break;
}
case kWhatRetryRead: {
int64_t seekTimeUs;
int seekMode;
if (msg->findInt64("seekTimeUs", &seekTimeUs) &&
msg->findInt32("seekMode", &seekMode)) {
MediaSource::ReadOptions options;
options.setSeekTo(
seekTimeUs,
static_cast<MediaSource::ReadOptions::SeekMode>(seekMode));
doRead(&options);
} else {
doRead();
}
break;
}
case kWhatSeek: {
int64_t seekTimeUs = 0;
msg->findInt64("seekTimeUs", &seekTimeUs);
if (seekTimeUs < 0) {
sp<MediaPlayerBase> listener = mListener.promote();
if (listener != NULL) {
int32_t positionMs = 0;
listener->getCurrentPosition(&positionMs);
seekTimeUs = positionMs * 1000ll;
}
}
doSeekAndRead(seekTimeUs);
break;
}
case kWhatSendSubtitle: {
int32_t generation;
CHECK(msg->findInt32("generation", &generation));
if (generation != mSendSubtitleGeneration) {
// Drop obsolete msg.
break;
}
sp<RefBase> obj;
if (msg->findObject("subtitle", &obj)) {
sp<ParcelEvent> parcelEvent;
parcelEvent = static_cast<ParcelEvent*>(obj.get());
notifyListener(&(parcelEvent->parcel));
doRead();
} else {
notifyListener();
}
break;
}
case kWhatSetSource: {
sp<RefBase> obj;
msg->findObject("source", &obj);
if (obj == NULL) break;
if (mSource != NULL) {
mSource->stop();
}
mSource = static_cast<TimedTextSource*>(obj.get());
status_t err = mSource->start();
if (err != OK) {
notifyError(err);
break;
}
Parcel parcel;
err = mSource->extractGlobalDescriptions(&parcel);
if (err != OK) {
notifyError(err);
break;
}
notifyListener(&parcel);
break;
}
}
}
示例13: onMessageReceived
void Converter::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatMediaPullerNotify:
{
int32_t what;
CHECK(msg->findInt32("what", &what));
if (!mIsPCMAudio && mEncoder == NULL) {
ALOGV("got msg '%s' after encoder shutdown.",
msg->debugString().c_str());
if (what == MediaPuller::kWhatAccessUnit) {
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
void *mbuf;
if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf)
&& mbuf != NULL) {
ALOGV("releasing mbuf %p", mbuf);
accessUnit->meta()->setPointer("mediaBuffer", NULL);
static_cast<MediaBuffer *>(mbuf)->release();
mbuf = NULL;
}
}
break;
}
if (what == MediaPuller::kWhatEOS) {
mInputBufferQueue.push_back(NULL);
feedEncoderInputBuffers();
scheduleDoMoreWork();
} else {
CHECK_EQ(what, MediaPuller::kWhatAccessUnit);
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
#if 0
void *mbuf;
if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf)
&& mbuf != NULL) {
ALOGI("queueing mbuf %p", mbuf);
}
#endif
#if ENABLE_SILENCE_DETECTION
if (!mIsVideo) {
if (IsSilence(accessUnit)) {
if (mInSilentMode) {
break;
}
int64_t nowUs = ALooper::GetNowUs();
if (mFirstSilentFrameUs < 0ll) {
mFirstSilentFrameUs = nowUs;
} else if (nowUs >= mFirstSilentFrameUs + 10000000ll) {
mInSilentMode = true;
ALOGI("audio in silent mode now.");
break;
}
} else {
if (mInSilentMode) {
ALOGI("audio no longer in silent mode.");
}
mInSilentMode = false;
mFirstSilentFrameUs = -1ll;
}
}
#endif
mInputBufferQueue.push_back(accessUnit);
feedEncoderInputBuffers();
scheduleDoMoreWork();
}
break;
}
case kWhatEncoderActivity:
{
#if 0
int64_t whenUs;
if (msg->findInt64("whenUs", &whenUs)) {
int64_t nowUs = ALooper::GetNowUs();
ALOGI("[%s] kWhatEncoderActivity after %lld us",
mIsVideo ? "video" : "audio", nowUs - whenUs);
}
#endif
mDoMoreWorkPending = false;
if (mEncoder == NULL) {
break;
}
//.........这里部分代码省略.........
示例14: onCompleteConnection
void ARTSPConnection::onCompleteConnection(const sp<AMessage> &msg) {
sp<AMessage> reply;
CHECK(msg->findMessage("reply", &reply));
int32_t connectionID;
CHECK(msg->findInt32("connection-id", &connectionID));
if ((connectionID != mConnectionID) || mState != CONNECTING) {
// While we were attempting to connect, the attempt was
// cancelled.
reply->setInt32("result", -ECONNABORTED);
reply->post();
return;
}
struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = kSelectTimeoutUs;
fd_set ws;
FD_ZERO(&ws);
FD_SET(mSocket, &ws);
int res = select(mSocket + 1, NULL, &ws, NULL, &tv);
CHECK_GE(res, 0);
if (res == 0) {
// Timed out. Not yet connected.
#ifndef ANDROID_DEFAULT_CODE
int64_t then, now = ALooper::GetNowUs();
if (msg->findInt64("timestamp", &then) && now - then > kRequestTimeout) {
ALOGE("connection timeout %lld > %lld", now, then);
reply->setInt32("result", -110 /*ETIMEDOUT*/);
reply->post();
mState = DISCONNECTED;
close(mSocket);
mSocket = -1;
return;
}
if(mExited)
return;
#endif // #ifndef ANDROID_DEFAULT_CODE
msg->post();
return;
}
int err;
socklen_t optionLen = sizeof(err);
CHECK_EQ(getsockopt(mSocket, SOL_SOCKET, SO_ERROR, &err, &optionLen), 0);
CHECK_EQ(optionLen, (socklen_t)sizeof(err));
if (err != 0) {
ALOGE("err = %d (%s)", err, strerror(err));
reply->setInt32("result", -err);
mState = DISCONNECTED;
if (mUIDValid) {
HTTPBase::UnRegisterSocketUserTag(mSocket);
HTTPBase::UnRegisterSocketUserMark(mSocket);
}
close(mSocket);
mSocket = -1;
} else {
reply->setInt32("result", OK);
mState = CONNECTED;
mNextCSeq = 1;
postReceiveReponseEvent();
}
reply->post();
}
示例15: onMessageReceived
//.........这里部分代码省略.........
{
int32_t what;
CHECK(msg->findInt32("what", &what));
if (what == Renderer::kWhatEOS) {
int32_t audio;
CHECK(msg->findInt32("audio", &audio));
int32_t finalResult;
CHECK(msg->findInt32("finalResult", &finalResult));
if (audio) {
mAudioEOS = true;
} else {
mVideoEOS = true;
}
if (finalResult == ERROR_END_OF_STREAM) {
LOGV("reached %s EOS", audio ? "audio" : "video");
} else {
LOGE("%s track encountered an error (%d)",
audio ? "audio" : "video", finalResult);
notifyListener(
MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, finalResult);
}
if ((mAudioEOS || mAudioDecoder == NULL)
&& (mVideoEOS || mVideoDecoder == NULL)) {
notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0);
}
} else if (what == Renderer::kWhatPosition) {
int64_t positionUs;
CHECK(msg->findInt64("positionUs", &positionUs));
CHECK(msg->findInt64("videoLateByUs", &mVideoLateByUs));
if (mDriver != NULL) {
sp<NuPlayerDriver> driver = mDriver.promote();
if (driver != NULL) {
driver->notifyPosition(positionUs);
driver->notifyFrameStats(
mNumFramesTotal, mNumFramesDropped);
}
}
} else if (what == Renderer::kWhatFlushComplete) {
CHECK_EQ(what, (int32_t)Renderer::kWhatFlushComplete);
int32_t audio;
CHECK(msg->findInt32("audio", &audio));
LOGV("renderer %s flush completed.", audio ? "audio" : "video");
}
break;
}
case kWhatMoreDataQueued:
{
break;
}
case kWhatReset:
{
LOGV("kWhatReset");