本文整理汇总了C++中SurfaceDescriptor::get_SurfaceDescriptorGralloc方法的典型用法代码示例。如果您正苦于以下问题:C++ SurfaceDescriptor::get_SurfaceDescriptorGralloc方法的具体用法?C++ SurfaceDescriptor::get_SurfaceDescriptorGralloc怎么用?C++ SurfaceDescriptor::get_SurfaceDescriptorGralloc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SurfaceDescriptor
的用法示例。
在下文中一共展示了SurfaceDescriptor::get_SurfaceDescriptorGralloc方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gfxImageSurface
/*static*/ already_AddRefed<gfxASurface>
ShadowLayerForwarder::PlatformOpenDescriptor(OpenMode aMode,
const SurfaceDescriptor& aSurface)
{
PROFILER_LABEL("ShadowLayerForwarder", "PlatformOpenDescriptor");
if (SurfaceDescriptor::TSurfaceDescriptorGralloc != aSurface.type()) {
return nullptr;
}
sp<GraphicBuffer> buffer =
GrallocBufferActor::GetFrom(aSurface.get_SurfaceDescriptorGralloc());
uint32_t usage = GRALLOC_USAGE_SW_READ_OFTEN;
if (OPEN_READ_WRITE == aMode) {
usage |= GRALLOC_USAGE_SW_WRITE_OFTEN;
}
void *vaddr;
DebugOnly<status_t> status = buffer->lock(usage, &vaddr);
// If we fail to lock, we'll just end up aborting anyway.
MOZ_ASSERT(status == OK);
gfxIntSize size = aSurface.get_SurfaceDescriptorGralloc().size();
gfxImageFormat format = ImageFormatForPixelFormat(buffer->getPixelFormat());
long pixelStride = buffer->getStride();
long byteStride = pixelStride * gfxASurface::BytePerPixelFromFormat(format);
nsRefPtr<gfxASurface> surf =
new gfxImageSurface((unsigned char*)vaddr, size, byteStride, format);
return surf->CairoStatus() ? nullptr : surf.forget();
}
示例2:
/*static*/ bool
ShadowLayerForwarder::PlatformGetDescriptorSurfaceSize(
const SurfaceDescriptor& aDescriptor, OpenMode aMode,
gfxIntSize* aSize,
gfxASurface** aSurface)
{
if (SurfaceDescriptor::TSurfaceDescriptorGralloc != aDescriptor.type()) {
return false;
}
sp<GraphicBuffer> buffer =
GrallocBufferActor::GetFrom(aDescriptor.get_SurfaceDescriptorGralloc());
*aSize = aDescriptor.get_SurfaceDescriptorGralloc().size();
return true;
}
示例3: SurfaceFormatForAndroidPixelFormat
void
GrallocDeprecatedTextureHostOGL::SwapTexturesImpl(const SurfaceDescriptor& aImage,
nsIntRegion*)
{
MOZ_ASSERT(aImage.type() == SurfaceDescriptor::TSurfaceDescriptorGralloc);
if (mBuffer) {
// only done for hacky fix in gecko 23 for bug 862324.
RegisterDeprecatedTextureHostAtGrallocBufferActor(nullptr, *mBuffer);
}
const SurfaceDescriptorGralloc& desc = aImage.get_SurfaceDescriptorGralloc();
mGraphicBuffer = GrallocBufferActor::GetFrom(desc);
mIsRBSwapped = desc.isRBSwapped();
mFormat = SurfaceFormatForAndroidPixelFormat(mGraphicBuffer->getPixelFormat(),
mIsRBSwapped);
mTextureTarget = TextureTargetForAndroidPixelFormat(mGraphicBuffer->getPixelFormat());
DeleteTextures();
// only done for hacky fix in gecko 23 for bug 862324.
// Doing this in SetBuffer is not enough, as DeprecatedImageHostBuffered::SwapTextures can
// change the value of *mBuffer without calling SetBuffer again.
RegisterDeprecatedTextureHostAtGrallocBufferActor(this, aImage);
}
示例4: switch
// Convert pixels in graphic buffer to NV12 format. aSource is the layer image
// containing source graphic buffer, and aDestination is the destination of
// conversion. Currently only 2 source format are supported:
// - NV21/HAL_PIXEL_FORMAT_YCrCb_420_SP (from camera preview window).
// - YV12/HAL_PIXEL_FORMAT_YV12 (from video decoder).
static
void
ConvertGrallocImageToNV12(GrallocImage* aSource, uint8_t* aDestination)
{
// Get graphic buffer.
SurfaceDescriptor handle = aSource->GetSurfaceDescriptor();
SurfaceDescriptorGralloc gralloc = handle.get_SurfaceDescriptorGralloc();
sp<GraphicBuffer> graphicBuffer = GrallocBufferActor::GetFrom(gralloc);
int pixelFormat = graphicBuffer->getPixelFormat();
// Only support NV21 (from camera) or YV12 (from HW decoder output) for now.
NS_ENSURE_TRUE_VOID(pixelFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP ||
pixelFormat == HAL_PIXEL_FORMAT_YV12);
void* imgPtr = nullptr;
graphicBuffer->lock(GraphicBuffer::USAGE_SW_READ_MASK, &imgPtr);
// Build PlanarYCbCrData for NV21 or YV12 buffer.
PlanarYCbCrData yuv;
switch (pixelFormat) {
case HAL_PIXEL_FORMAT_YCrCb_420_SP: // From camera.
yuv.mYChannel = static_cast<uint8_t*>(imgPtr);
yuv.mYSkip = 0;
yuv.mYSize.width = graphicBuffer->getWidth();
yuv.mYSize.height = graphicBuffer->getHeight();
yuv.mYStride = graphicBuffer->getStride();
// 4:2:0.
yuv.mCbCrSize.width = yuv.mYSize.width / 2;
yuv.mCbCrSize.height = yuv.mYSize.height / 2;
// Interleaved VU plane.
yuv.mCrChannel = yuv.mYChannel + (yuv.mYStride * yuv.mYSize.height);
yuv.mCrSkip = 1;
yuv.mCbChannel = yuv.mCrChannel + 1;
yuv.mCbSkip = 1;
yuv.mCbCrStride = yuv.mYStride;
ConvertPlanarYCbCrToNV12(&yuv, aDestination);
break;
case HAL_PIXEL_FORMAT_YV12: // From video decoder.
// Android YV12 format is defined in system/core/include/system/graphics.h
yuv.mYChannel = static_cast<uint8_t*>(imgPtr);
yuv.mYSkip = 0;
yuv.mYSize.width = graphicBuffer->getWidth();
yuv.mYSize.height = graphicBuffer->getHeight();
yuv.mYStride = graphicBuffer->getStride();
// 4:2:0.
yuv.mCbCrSize.width = yuv.mYSize.width / 2;
yuv.mCbCrSize.height = yuv.mYSize.height / 2;
yuv.mCrChannel = yuv.mYChannel + (yuv.mYStride * yuv.mYSize.height);
// Aligned to 16 bytes boundary.
yuv.mCbCrStride = (yuv.mYStride / 2 + 15) & ~0x0F;
yuv.mCrSkip = 0;
yuv.mCbChannel = yuv.mCrChannel + (yuv.mCbCrStride * yuv.mCbCrSize.height);
yuv.mCbSkip = 0;
ConvertPlanarYCbCrToNV12(&yuv, aDestination);
break;
default:
NS_ERROR("Unsupported input gralloc image type. Should never be here.");
}
graphicBuffer->unlock();
}
示例5:
// only used for hacky fix in gecko 23 for bug 862324
static void
RegisterDeprecatedTextureHostAtGrallocBufferActor(DeprecatedTextureHost* aDeprecatedTextureHost, const SurfaceDescriptor& aSurfaceDescriptor)
{
if (IsSurfaceDescriptorValid(aSurfaceDescriptor)) {
GrallocBufferActor* actor = static_cast<GrallocBufferActor*>(aSurfaceDescriptor.get_SurfaceDescriptorGralloc().bufferParent());
actor->SetDeprecatedTextureHost(aDeprecatedTextureHost);
}
}
示例6: GetGraphicBufferFrom
android::sp<android::GraphicBuffer>
GetGraphicBufferFromDesc(SurfaceDescriptor aDesc)
{
MaybeMagicGrallocBufferHandle handle;
if (aDesc.type() == SurfaceDescriptor::TSurfaceDescriptorGralloc) {
handle = aDesc.get_SurfaceDescriptorGralloc().buffer();
}
return GetGraphicBufferFrom(handle);
}
示例7:
bool
ImageBridgeChild::DeallocSurfaceDescriptorGrallocNow(const SurfaceDescriptor& aBuffer)
{
#ifdef MOZ_HAVE_SURFACEDESCRIPTORGRALLOC
PGrallocBufferChild* gbp =
aBuffer.get_SurfaceDescriptorGralloc().bufferChild();
PGrallocBufferChild::Send__delete__(gbp);
return true;
#else
NS_RUNTIMEABORT("Um, how did we get here?");
return false;
#endif
}
示例8:
/*static*/ bool
ShadowLayerForwarder::PlatformGetDescriptorSurfaceContentType(
const SurfaceDescriptor& aDescriptor, OpenMode aMode,
gfxContentType* aContent,
gfxASurface** aSurface)
{
if (SurfaceDescriptor::TSurfaceDescriptorGralloc != aDescriptor.type()) {
return false;
}
sp<GraphicBuffer> buffer =
GrallocBufferActor::GetFrom(aDescriptor.get_SurfaceDescriptorGralloc());
*aContent = ContentTypeFromPixelFormat(buffer->getPixelFormat());
return true;
}
示例9: dequeueBuffer
//.........这里部分代码省略.........
*outBuf = found;
const bool useDefaultSize = !w && !h;
if (useDefaultSize) {
// use the default size
w = mDefaultWidth;
h = mDefaultHeight;
}
updateFormat = (format != 0);
if (!updateFormat) {
// keep the current (or default) format
format = mPixelFormat;
}
mSlots[buf].mBufferState = BufferSlot::DEQUEUED;
const sp<GraphicBuffer>& gbuf(mSlots[buf].mGraphicBuffer);
alloc = (gbuf == NULL);
if ((gbuf!=NULL) &&
((uint32_t(gbuf->width) != w) ||
(uint32_t(gbuf->height) != h) ||
(uint32_t(gbuf->format) != format) ||
((uint32_t(gbuf->usage) & usage) != usage))) {
alloc = true;
descOld = mSlots[buf].mSurfaceDescriptor;
}
}
// At this point, the buffer is now marked DEQUEUED, and no one else
// should touch it, except for freeAllBuffersLocked(); we handle that
// after trying to create the surface descriptor below.
//
// So we don't need mMutex locked, which would otherwise run the risk
// of a deadlock on calling AllocSurfaceDescriptorGralloc().
SurfaceDescriptor desc;
ImageBridgeChild* ibc;
sp<GraphicBuffer> graphicBuffer;
if (alloc) {
usage |= GraphicBuffer::USAGE_HW_TEXTURE;
status_t error;
ibc = ImageBridgeChild::GetSingleton();
CNW_LOGD("dequeueBuffer: about to alloc surface descriptor");
ibc->AllocSurfaceDescriptorGralloc(IntSize(w, h),
format,
usage,
&desc);
// We can only use a gralloc buffer here. If we didn't get
// one back, something went wrong.
CNW_LOGD("dequeueBuffer: got surface descriptor");
if (SurfaceDescriptor::TSurfaceDescriptorGralloc != desc.type()) {
MOZ_ASSERT(SurfaceDescriptor::T__None == desc.type());
CNW_LOGE("dequeueBuffer: failed to alloc gralloc buffer");
return -ENOMEM;
}
graphicBuffer = GrallocBufferActor::GetFrom(desc.get_SurfaceDescriptorGralloc());
error = graphicBuffer->initCheck();
if (error != NO_ERROR) {
CNW_LOGE("dequeueBuffer: createGraphicBuffer failed with error %d", error);
return error;
}
}
bool tooOld = false;
{
Mutex::Autolock lock(mMutex);
if (generation == mGeneration) {
if (updateFormat) {
mPixelFormat = format;
}
if (alloc) {
mSlots[buf].mGraphicBuffer = graphicBuffer;
mSlots[buf].mSurfaceDescriptor = desc;
mSlots[buf].mSurfaceDescriptor.get_SurfaceDescriptorGralloc().external() = true;
mSlots[buf].mRequestBufferCalled = false;
returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION;
}
CNW_LOGD("dequeueBuffer: returning slot=%d buf=%p ", buf,
mSlots[buf].mGraphicBuffer->handle);
} else {
tooOld = true;
}
}
if (alloc && IsSurfaceDescriptorValid(descOld)) {
ibc->DeallocSurfaceDescriptorGralloc(descOld);
}
if (alloc && tooOld) {
ibc->DeallocSurfaceDescriptorGralloc(desc);
}
CNW_LOGD("dequeueBuffer: returning slot=%d buf=%p ", buf,
mSlots[buf].mGraphicBuffer->handle );
CNW_LOGD("dequeueBuffer: X");
return returnFlags;
}
示例10: switch
already_AddRefed<TextureHost>
CreateTextureHostOGL(const SurfaceDescriptor& aDesc,
ISurfaceAllocator* aDeallocator,
TextureFlags aFlags)
{
RefPtr<TextureHost> result;
switch (aDesc.type()) {
case SurfaceDescriptor::TSurfaceDescriptorBuffer: {
result = CreateBackendIndependentTextureHost(aDesc,
aDeallocator, aFlags);
break;
}
#ifdef MOZ_WIDGET_ANDROID
case SurfaceDescriptor::TSurfaceTextureDescriptor: {
const SurfaceTextureDescriptor& desc = aDesc.get_SurfaceTextureDescriptor();
result = new SurfaceTextureHost(aFlags,
(AndroidSurfaceTexture*)desc.surfTex(),
desc.size());
break;
}
#endif
case SurfaceDescriptor::TEGLImageDescriptor: {
const EGLImageDescriptor& desc = aDesc.get_EGLImageDescriptor();
result = new EGLImageTextureHost(aFlags,
(EGLImage)desc.image(),
(EGLSync)desc.fence(),
desc.size(),
desc.hasAlpha());
break;
}
#ifdef XP_MACOSX
case SurfaceDescriptor::TSurfaceDescriptorMacIOSurface: {
const SurfaceDescriptorMacIOSurface& desc =
aDesc.get_SurfaceDescriptorMacIOSurface();
result = new MacIOSurfaceTextureHostOGL(aFlags, desc);
break;
}
#endif
#ifdef MOZ_WIDGET_GONK
case SurfaceDescriptor::TSurfaceDescriptorGralloc: {
const SurfaceDescriptorGralloc& desc =
aDesc.get_SurfaceDescriptorGralloc();
result = new GrallocTextureHostOGL(aFlags, desc);
break;
}
#endif
#ifdef GL_PROVIDER_GLX
case SurfaceDescriptor::TSurfaceDescriptorX11: {
const auto& desc = aDesc.get_SurfaceDescriptorX11();
result = new X11TextureHost(aFlags, desc);
break;
}
#endif
case SurfaceDescriptor::TSurfaceDescriptorSharedGLTexture: {
const auto& desc = aDesc.get_SurfaceDescriptorSharedGLTexture();
result = new GLTextureHost(aFlags, desc.texture(),
desc.target(),
(GLsync)desc.fence(),
desc.size(),
desc.hasAlpha());
break;
}
default: return nullptr;
}
return result.forget();
}
示例11: autoTex
SharedSurface_Gralloc*
SharedSurface_Gralloc::Create(GLContext* prodGL,
const GLFormats& formats,
const gfxIntSize& size,
bool hasAlpha,
ISurfaceAllocator* allocator)
{
static bool runOnce = true;
if (runOnce) {
sForceReadPixelsToFence = false;
mozilla::Preferences::AddBoolVarCache(&sForceReadPixelsToFence,
"gfx.gralloc.fence-with-readpixels");
runOnce = false;
}
GLLibraryEGL* egl = prodGL->GetLibraryEGL();
MOZ_ASSERT(egl);
DEBUG_PRINT("SharedSurface_Gralloc::Create -------\n");
if (!HasExtensions(egl, prodGL))
return nullptr;
SurfaceDescriptor baseDesc;
SurfaceDescriptorGralloc desc;
gfxContentType type = hasAlpha ? GFX_CONTENT_COLOR_ALPHA
: GFX_CONTENT_COLOR;
if (!allocator->AllocSurfaceDescriptorWithCaps(size, type, USING_GL_RENDERING_ONLY, &baseDesc))
return false;
if (baseDesc.type() != SurfaceDescriptor::TSurfaceDescriptorGralloc) {
allocator->DestroySharedSurface(&baseDesc);
return false;
}
desc = baseDesc.get_SurfaceDescriptorGralloc();
sp<GraphicBuffer> buffer = GrallocBufferActor::GetFrom(desc);
EGLDisplay display = egl->Display();
EGLClientBuffer clientBuffer = buffer->getNativeBuffer();
EGLint attrs[] = {
LOCAL_EGL_NONE, LOCAL_EGL_NONE
};
EGLImage image = egl->fCreateImage(display,
EGL_NO_CONTEXT,
LOCAL_EGL_NATIVE_BUFFER_ANDROID,
clientBuffer, attrs);
if (!image) {
allocator->DestroySharedSurface(&baseDesc);
return nullptr;
}
prodGL->MakeCurrent();
GLuint prodTex = 0;
prodGL->fGenTextures(1, &prodTex);
ScopedBindTexture autoTex(prodGL, prodTex);
prodGL->fTexParameteri(LOCAL_GL_TEXTURE_2D, LOCAL_GL_TEXTURE_MIN_FILTER, LOCAL_GL_LINEAR);
prodGL->fTexParameteri(LOCAL_GL_TEXTURE_2D, LOCAL_GL_TEXTURE_MAG_FILTER, LOCAL_GL_LINEAR);
prodGL->fTexParameteri(LOCAL_GL_TEXTURE_2D, LOCAL_GL_TEXTURE_WRAP_S, LOCAL_GL_CLAMP_TO_EDGE);
prodGL->fTexParameteri(LOCAL_GL_TEXTURE_2D, LOCAL_GL_TEXTURE_WRAP_T, LOCAL_GL_CLAMP_TO_EDGE);
prodGL->fEGLImageTargetTexture2D(LOCAL_GL_TEXTURE_2D, image);
egl->fDestroyImage(display, image);
SharedSurface_Gralloc *surf = new SharedSurface_Gralloc(prodGL, size, hasAlpha, egl, allocator, desc, prodTex);
DEBUG_PRINT("SharedSurface_Gralloc::Create: success -- surface %p, GraphicBuffer %p.\n", surf, buffer.get());
return surf;
}
示例12: dequeueBuffer
int GonkNativeWindow::dequeueBuffer(android_native_buffer_t** buffer)
{
Mutex::Autolock lock(mMutex);
int found = -1;
int dequeuedCount = 0;
bool tryAgain = true;
CNW_LOGD("dequeueBuffer: E");
while (tryAgain) {
// look for a free buffer to give to the client
found = INVALID_BUFFER_SLOT;
dequeuedCount = 0;
for (int i = 0; i < mBufferCount; i++) {
const int state = mSlots[i].mBufferState;
if (state == BufferSlot::DEQUEUED) {
dequeuedCount++;
}
else if (state == BufferSlot::FREE) {
/* We return the oldest of the free buffers to avoid
* stalling the producer if possible. This is because
* the consumer may still have pending reads of the
* buffers in flight.
*/
bool isOlder = mSlots[i].mFrameNumber < mSlots[found].mFrameNumber;
if (found < 0 || isOlder) {
found = i;
}
}
}
// we're in synchronous mode and didn't find a buffer, we need to
// wait for some buffers to be consumed
tryAgain = (found == INVALID_BUFFER_SLOT);
if (tryAgain) {
CNW_LOGD("dequeueBuffer: Try again");
mDequeueCondition.wait(mMutex);
CNW_LOGD("dequeueBuffer: Now");
}
}
if (found == INVALID_BUFFER_SLOT) {
// This should not happen.
CNW_LOGE("dequeueBuffer: no available buffer slots");
return -EBUSY;
}
const int buf = found;
// buffer is now in DEQUEUED
mSlots[buf].mBufferState = BufferSlot::DEQUEUED;
const sp<GraphicBuffer>& gbuf(mSlots[buf].mGraphicBuffer);
if (gbuf == NULL) {
status_t error;
SurfaceDescriptor buffer;
ImageBridgeChild *ibc = ImageBridgeChild::GetSingleton();
ibc->AllocSurfaceDescriptorGralloc(gfxIntSize(mDefaultWidth, mDefaultHeight),
mPixelFormat,
mUsage,
&buffer);
sp<GraphicBuffer> graphicBuffer =
GrallocBufferActor::GetFrom(buffer.get_SurfaceDescriptorGralloc());
if (!graphicBuffer.get()) {
return -ENOMEM;
}
error = graphicBuffer->initCheck();
if (error != NO_ERROR) {
CNW_LOGE("dequeueBuffer: createGraphicBuffer failed with error %d",error);
return error;
}
mSlots[buf].mGraphicBuffer = graphicBuffer;
mSlots[buf].mSurfaceDescriptor = buffer;
mSlots[buf].mSurfaceDescriptor.get_SurfaceDescriptorGralloc().external() = true;
}
*buffer = mSlots[buf].mGraphicBuffer.get();
CNW_LOGD("dequeueBuffer: returning slot=%d buf=%p ", buf,
mSlots[buf].mGraphicBuffer->handle );
CNW_LOGD("dequeueBuffer: X");
return NO_ERROR;
}
示例13: Encode
nsresult
OMXVideoEncoder::Encode(const Image* aImage, int aWidth, int aHeight,
int64_t aTimestamp, int aInputFlags)
{
MOZ_ASSERT(mStarted, "Configure() should be called before Encode().");
NS_ENSURE_TRUE(aWidth == mWidth && aHeight == mHeight && aTimestamp >= 0,
NS_ERROR_INVALID_ARG);
status_t result;
// Dequeue an input buffer.
uint32_t index;
result = mCodec->dequeueInputBuffer(&index, INPUT_BUFFER_TIMEOUT_US);
NS_ENSURE_TRUE(result == OK, NS_ERROR_FAILURE);
const sp<ABuffer>& inBuf = mInputBufs.itemAt(index);
uint8_t* dst = inBuf->data();
size_t dstSize = inBuf->capacity();
size_t yLen = aWidth * aHeight;
size_t uvLen = yLen / 2;
// Buffer should be large enough to hold input image data.
MOZ_ASSERT(dstSize >= yLen + uvLen);
inBuf->setRange(0, yLen + uvLen);
if (!aImage) {
// Generate muted/black image directly in buffer.
dstSize = yLen + uvLen;
// Fill Y plane.
memset(dst, 0x10, yLen);
// Fill UV plane.
memset(dst + yLen, 0x80, uvLen);
} else {
Image* img = const_cast<Image*>(aImage);
ImageFormat format = img->GetFormat();
MOZ_ASSERT(aWidth == img->GetSize().width &&
aHeight == img->GetSize().height);
if (format == GRALLOC_PLANAR_YCBCR) {
// Get graphic buffer pointer.
void* imgPtr = nullptr;
GrallocImage* nativeImage = static_cast<GrallocImage*>(img);
SurfaceDescriptor handle = nativeImage->GetSurfaceDescriptor();
SurfaceDescriptorGralloc gralloc = handle.get_SurfaceDescriptorGralloc();
sp<GraphicBuffer> graphicBuffer = GrallocBufferActor::GetFrom(gralloc);
graphicBuffer->lock(GraphicBuffer::USAGE_SW_READ_MASK, &imgPtr);
uint8_t* src = static_cast<uint8_t*>(imgPtr);
// Only support NV21 for now.
MOZ_ASSERT(graphicBuffer->getPixelFormat() ==
HAL_PIXEL_FORMAT_YCrCb_420_SP);
// Build PlanarYCbCrData for NV21 buffer.
PlanarYCbCrData nv21;
// Y plane.
nv21.mYChannel = src;
nv21.mYSize.width = aWidth;
nv21.mYSize.height = aHeight;
nv21.mYStride = aWidth;
nv21.mYSkip = 0;
// Interleaved VU plane.
nv21.mCrChannel = src + yLen;
nv21.mCrSkip = 1;
nv21.mCbChannel = nv21.mCrChannel + 1;
nv21.mCbSkip = 1;
nv21.mCbCrStride = aWidth;
// 4:2:0.
nv21.mCbCrSize.width = aWidth / 2;
nv21.mCbCrSize.height = aHeight / 2;
ConvertPlanarYCbCrToNV12(&nv21, dst);
graphicBuffer->unlock();
} else if (format == PLANAR_YCBCR) {
ConvertPlanarYCbCrToNV12(static_cast<PlanarYCbCrImage*>(img)->GetData(),
dst);
} else {
// TODO: support RGB to YUV color conversion.
NS_ERROR("Unsupported input image type.");
}
}
// Queue this input buffer.
result = mCodec->queueInputBuffer(index, 0, dstSize, aTimestamp, aInputFlags);
return result == OK ? NS_OK : NS_ERROR_FAILURE;
}