本文整理汇总了C++中VideoSegment::AppendFrame方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoSegment::AppendFrame方法的具体用法?C++ VideoSegment::AppendFrame怎么用?C++ VideoSegment::AppendFrame使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoSegment
的用法示例。
在下文中一共展示了VideoSegment::AppendFrame方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mon
void
MediaEngineTabVideoSource::
NotifyPull(MediaStreamGraph*, SourceMediaStream* aSource, mozilla::TrackID aID, mozilla::StreamTime aDesiredTime, mozilla::TrackTicks& aLastEndTime)
{
VideoSegment segment;
MonitorAutoLock mon(mMonitor);
// Note: we're not giving up mImage here
nsRefPtr<layers::CairoImage> image = mImage;
TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
TrackTicks delta = target - aLastEndTime;
if (delta > 0) {
// nullptr images are allowed
if (image) {
gfx::IntSize size = image->GetSize();
segment.AppendFrame(image.forget(), delta, gfx::ThebesIntSize(size));
} else {
segment.AppendFrame(nullptr, delta, gfxIntSize(0,0));
}
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
if (aSource->AppendToTrack(aID, &(segment))) {
aLastEndTime = target;
}
}
}
示例2: lock
void
MediaEngineDefaultVideoSource::NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aID,
StreamTime aDesiredTime,
TrackTicks &aLastEndTime)
{
// AddTrack takes ownership of segment
VideoSegment segment;
MonitorAutoLock lock(mMonitor);
if (mState != kStarted) {
return;
}
// Note: we're not giving up mImage here
nsRefPtr<layers::Image> image = mImage;
TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
TrackTicks delta = target - aLastEndTime;
if (delta > 0) {
// nullptr images are allowed
if (image) {
segment.AppendFrame(image.forget(), delta,
IntSize(mOpts.mWidth, mOpts.mHeight));
} else {
segment.AppendFrame(nullptr, delta, IntSize(0, 0));
}
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
if (aSource->AppendToTrack(aID, &segment)) {
aLastEndTime = target;
}
}
}
示例3:
// Test encoding a track that starts with null data
TEST(VP8VideoTrackEncoder, NullFrameFirst)
{
// Initiate VP8 encoder
TestVP8TrackEncoder encoder;
InitParam param = {true, 640, 480};
encoder.TestInit(param);
YUVBufferGenerator generator;
generator.Init(mozilla::gfx::IntSize(640, 480));
RefPtr<Image> image = generator.GenerateI420Image();
TimeStamp now = TimeStamp::Now();
VideoSegment segment;
// Pass 2 100ms null frames to the encoder.
for (uint32_t i = 0; i < 2; ++i) {
segment.AppendFrame(nullptr,
mozilla::StreamTime(9000), // 100ms
generator.GetSize(),
PRINCIPAL_HANDLE_NONE,
false,
now + TimeDuration::FromSeconds(i * 0.1));
}
// Pass a real 100ms frame to the encoder.
segment.AppendFrame(image.forget(),
mozilla::StreamTime(9000), // 100ms
generator.GetSize(),
PRINCIPAL_HANDLE_NONE,
false,
now + TimeDuration::FromSeconds(0.3));
encoder.SetCurrentFrames(segment);
// End the track.
segment.Clear();
encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, TrackEventCommand::TRACK_EVENT_ENDED, segment);
EncodedFrameContainer container;
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
EXPECT_TRUE(encoder.IsEncodingComplete());
// Verify total duration being 0.3s.
uint64_t totalDuration = 0;
for (auto& frame : container.GetEncodedFrames()) {
totalDuration += frame->GetDuration();
}
const uint64_t pointThree = (PR_USEC_PER_SEC / 10) * 3;
EXPECT_EQ(pointThree, totalDuration);
}
示例4:
TEST(VP8VideoTrackEncoder, FrameEncode)
{
// Initiate VP8 encoder
TestVP8TrackEncoder encoder;
InitParam param = {true, 640, 480, 90000};
encoder.TestInit(param);
// Create YUV images as source.
nsTArray<RefPtr<Image>> images;
YUVBufferGenerator generator;
generator.Init(mozilla::gfx::IntSize(640, 480));
generator.Generate(images);
// Put generated YUV frame into video segment.
// Duration of each frame is 1 second.
VideoSegment segment;
for (nsTArray<RefPtr<Image>>::size_type i = 0; i < images.Length(); i++)
{
RefPtr<Image> image = images[i];
segment.AppendFrame(image.forget(), mozilla::StreamTime(90000), generator.GetSize());
}
// track change notification.
encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, segment);
// Pull Encoded Data back from encoder.
EncodedFrameContainer container;
EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
}
示例5: lock
void
MediaEngineDefaultVideoSource::NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aID,
StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle)
{
// AddTrack takes ownership of segment
VideoSegment segment;
MonitorAutoLock lock(mMonitor);
if (mState != kStarted) {
return;
}
// Note: we're not giving up mImage here
RefPtr<layers::Image> image = mImage;
StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
if (delta > 0) {
// nullptr images are allowed
IntSize size(image ? mOpts.mWidth : 0, image ? mOpts.mHeight : 0);
segment.AppendFrame(image.forget(), delta, size, aPrincipalHandle);
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
aSource->AppendToTrack(aID, &segment);
}
}
示例6: VideoSegment
void
DOMHwMediaStream::Init(MediaStream* stream)
{
SourceMediaStream* srcStream = stream->AsSourceStream();
if (srcStream) {
VideoSegment segment;
#ifdef MOZ_WIDGET_GONK
const StreamTime delta = STREAM_TIME_MAX; // Because MediaStreamGraph will run out frames in non-autoplay mode,
// we must give it bigger frame length to cover this situation.
mImageData.mOverlayId = DEFAULT_IMAGE_ID;
mImageData.mSize.width = DEFAULT_IMAGE_WIDTH;
mImageData.mSize.height = DEFAULT_IMAGE_HEIGHT;
mOverlayImage->SetData(mImageData);
RefPtr<Image> image = static_cast<Image*>(mOverlayImage.get());
mozilla::gfx::IntSize size = image->GetSize();
segment.AppendFrame(image.forget(), delta, size);
#endif
srcStream->AddTrack(TRACK_VIDEO_PRIMARY, 0, new VideoSegment());
srcStream->AppendToTrack(TRACK_VIDEO_PRIMARY, &segment);
srcStream->FinishAddTracks();
srcStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
}
}
示例7: mon
void
MediaEngineTabVideoSource::NotifyPull(MediaStreamGraph*,
SourceMediaStream* aSource,
TrackID aID, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle)
{
VideoSegment segment;
MonitorAutoLock mon(mMonitor);
if (mState != kStarted) {
return;
}
// Note: we're not giving up mImage here
RefPtr<layers::SourceSurfaceImage> image = mImage;
StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
if (delta > 0) {
// nullptr images are allowed
gfx::IntSize size = image ? image->GetSize() : IntSize(0, 0);
segment.AppendFrame(image.forget().downcast<layers::Image>(), delta, size,
aPrincipalHandle);
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
aSource->AppendToTrack(aID, &(segment));
}
}
示例8: lock
void
MediaEngineDefaultVideoSource::NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aID,
StreamTime aDesiredTime)
{
// AddTrack takes ownership of segment
VideoSegment segment;
MonitorAutoLock lock(mMonitor);
if (mState != kStarted) {
return;
}
// Note: we're not giving up mImage here
RefPtr<layers::Image> image = mImage;
StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
if (delta > 0) {
// nullptr images are allowed
IntSize size(image ? mOpts.mWidth : 0, image ? mOpts.mHeight : 0);
segment.AppendFrame(image.forget(), delta, size);
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
aSource->AppendToTrack(aID, &segment);
// Generate null data for fake tracks.
if (mHasFakeTracks) {
for (int i = 0; i < kFakeVideoTrackCount; ++i) {
VideoSegment nullSegment;
nullSegment.AppendNullData(delta);
aSource->AppendToTrack(kTrackCount + i, &nullSegment);
}
}
}
}
示例9: lock
// Called if the graph thinks it's running out of buffered video; repeat
// the last frame for whatever minimum period it think it needs. Note that
// this means that no *real* frame can be inserted during this period.
void
MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aID,
StreamTime aDesiredTime,
TrackTicks &aLastEndTime)
{
VideoSegment segment;
MonitorAutoLock lock(mMonitor);
if (mState != kStarted)
return;
// Note: we're not giving up mImage here
nsRefPtr<layers::Image> image = mImage;
TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
TrackTicks delta = target - aLastEndTime;
LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime,
(int64_t) target, (int64_t) delta, image ? "" : "<null>"));
// Bug 846188 We may want to limit incoming frames to the requested frame rate
// mFps - if you want 30FPS, and the camera gives you 60FPS, this could
// cause issues.
// We may want to signal if the actual frame rate is below mMinFPS -
// cameras often don't return the requested frame rate especially in low
// light; we should consider surfacing this so that we can switch to a
// lower resolution (which may up the frame rate)
// Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime
// Doing so means a negative delta and thus messes up handling of the graph
if (delta > 0) {
// nullptr images are allowed
if (image) {
segment.AppendFrame(image.forget(), delta, gfxIntSize(mWidth, mHeight));
} else {
segment.AppendFrame(nullptr, delta, gfxIntSize(0,0));
}
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
if (aSource->AppendToTrack(aID, &(segment))) {
aLastEndTime = target;
}
}
}
示例10: gfxIntSize
NS_IMETHODIMP
MediaEngineDefaultVideoSource::Notify(nsITimer* aTimer)
{
VideoSegment segment;
nsRefPtr<layers::PlanarYCbCrImage> image = mImage;
segment.AppendFrame(image.forget(), USECS_PER_S / FPS, gfxIntSize(WIDTH, HEIGHT));
mSource->AppendToTrack(mTrackID, &segment);
return NS_OK;
}
示例11: enter
// ViEExternalRenderer Callback. Process every incoming frame here.
int
MediaEngineWebRTCVideoSource::DeliverFrame(
unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time)
{
ReentrantMonitorAutoEnter enter(mMonitor);
if (mInSnapshotMode) {
// Set the condition variable to false and notify Snapshot().
PR_Lock(mSnapshotLock);
mInSnapshotMode = false;
PR_NotifyCondVar(mSnapshotCondVar);
PR_Unlock(mSnapshotLock);
return 0;
}
// Check for proper state.
if (mState != kStarted) {
return 0;
}
// Create a video frame and append it to the track.
layers::Image::Format format = layers::Image::PLANAR_YCBCR;
nsRefPtr<layers::Image> image = mImageContainer->CreateImage(&format, 1);
layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
PRUint8* frame = static_cast<PRUint8*> (buffer);
const PRUint8 lumaBpp = 8;
const PRUint8 chromaBpp = 4;
layers::PlanarYCbCrImage::Data data;
data.mYChannel = frame;
data.mYSize = gfxIntSize(mWidth, mHeight);
data.mYStride = mWidth * lumaBpp/ 8;
data.mCbCrStride = mWidth * chromaBpp / 8;
data.mCbChannel = frame + mHeight * data.mYStride;
data.mCrChannel = data.mCbChannel + mHeight * data.mCbCrStride / 2;
data.mCbCrSize = gfxIntSize(mWidth/ 2, mHeight/ 2);
data.mPicX = 0;
data.mPicY = 0;
data.mPicSize = gfxIntSize(mWidth, mHeight);
data.mStereoMode = layers::STEREO_MODE_MONO;
videoImage->SetData(data);
VideoSegment segment;
segment.AppendFrame(image.forget(), 1, gfxIntSize(mWidth, mHeight));
mSource->AppendToTrack(mTrackID, &(segment));
return 0;
}
示例12: iter
TEST(VideoSegment, TestAppendFrameNotForceBlack) {
RefPtr<layers::Image> testImage = nullptr;
VideoSegment segment;
segment.AppendFrame(testImage.forget(), mozilla::StreamTime(90000),
mozilla::gfx::IntSize(640, 480), PRINCIPAL_HANDLE_NONE);
VideoSegment::ChunkIterator iter(segment);
while (!iter.IsEnded()) {
VideoChunk chunk = *iter;
EXPECT_FALSE(chunk.mFrame.GetForceBlack());
iter.Next();
}
}
示例13: AllocateSolidColorFrame
nsresult
MediaEngineDefaultVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
{
if (mState != kAllocated) {
return NS_ERROR_FAILURE;
}
mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
if (!mTimer) {
return NS_ERROR_FAILURE;
}
mSource = aStream;
// Allocate a single blank Image
ImageFormat format = PLANAR_YCBCR;
mImageContainer = layers::LayerManager::CreateImageContainer();
nsRefPtr<layers::Image> image = mImageContainer->CreateImage(&format, 1);
mImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
layers::PlanarYCbCrImage::Data data;
// Allocate a single blank Image
mCb = 16;
mCr = 16;
AllocateSolidColorFrame(data, mOpts.mWidth, mOpts.mHeight, 0x80, mCb, mCr);
// SetData copies data, so we can free the frame
mImage->SetData(data);
ReleaseFrame(data);
// AddTrack takes ownership of segment
VideoSegment *segment = new VideoSegment();
segment->AppendFrame(image.forget(), USECS_PER_S / mOpts.mFPS,
gfxIntSize(mOpts.mWidth, mOpts.mHeight));
mSource->AddTrack(aID, VIDEO_RATE, 0, segment);
// We aren't going to add any more tracks
mSource->AdvanceKnownTracksTime(STREAM_TIME_MAX);
// Remember TrackID so we can end it later
mTrackID = aID;
// Start timer for subsequent frames
mTimer->InitWithCallback(this, 1000 / mOpts.mFPS, nsITimer::TYPE_REPEATING_SLACK);
mState = kStarted;
return NS_OK;
}
示例14: if
NS_IMETHODIMP
MediaEngineDefaultVideoSource::Notify(nsITimer* aTimer)
{
// Update the target color
if (mCr <= 16) {
if (mCb < 240) {
mCb++;
} else {
mCr++;
}
} else if (mCb >= 240) {
if (mCr < 240) {
mCr++;
} else {
mCb--;
}
} else if (mCr >= 240) {
if (mCb > 16) {
mCb--;
} else {
mCr--;
}
} else {
mCr--;
}
// Allocate a single solid color image
ImageFormat format = PLANAR_YCBCR;
nsRefPtr<layers::Image> image = mImageContainer->CreateImage(&format, 1);
nsRefPtr<layers::PlanarYCbCrImage> ycbcr_image =
static_cast<layers::PlanarYCbCrImage*>(image.get());
layers::PlanarYCbCrImage::Data data;
AllocateSolidColorFrame(data, mOpts.mWidth, mOpts.mHeight, 0x80, mCb, mCr);
ycbcr_image->SetData(data);
// SetData copies data, so we can free the frame
ReleaseFrame(data);
// AddTrack takes ownership of segment
VideoSegment segment;
segment.AppendFrame(ycbcr_image.forget(), USECS_PER_S / mOpts.mFPS,
gfxIntSize(mOpts.mWidth, mOpts.mHeight));
mSource->AppendToTrack(mTrackID, &segment);
return NS_OK;
}
示例15: AppendToTrack
// guts for appending data to the MSG track
bool MediaEngineCameraVideoSource::AppendToTrack(SourceMediaStream* aSource,
layers::Image* aImage,
TrackID aID,
StreamTime delta)
{
MOZ_ASSERT(aSource);
VideoSegment segment;
RefPtr<layers::Image> image = aImage;
IntSize size(image ? mWidth : 0, image ? mHeight : 0);
segment.AppendFrame(image.forget(), delta, size);
// This is safe from any thread, and is safe if the track is Finished
// or Destroyed.
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
return aSource->AppendToTrack(aID, &(segment));
}