当前位置: 首页>>代码示例>>C++>>正文


C++ PlanarYCbCrImage类代码示例

本文整理汇总了C++中PlanarYCbCrImage的典型用法代码示例。如果您正苦于以下问题:C++ PlanarYCbCrImage类的具体用法?C++ PlanarYCbCrImage怎么用?C++ PlanarYCbCrImage使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了PlanarYCbCrImage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: MOZ_ASSERT

nsresult VP8TrackEncoder::PrepareRawFrame(VideoChunk &aChunk)
{
  nsRefPtr<Image> img;
  if (aChunk.mFrame.GetForceBlack() || aChunk.IsNull()) {
    if (!mMuteFrame) {
      mMuteFrame = VideoFrame::CreateBlackImage(gfxIntSize(mFrameWidth, mFrameHeight));
      MOZ_ASSERT(mMuteFrame);
    }
    img = mMuteFrame;
  } else {
    img = aChunk.mFrame.GetImage();
  }

  ImageFormat format = img->GetFormat();
  if (format != ImageFormat::PLANAR_YCBCR) {
    VP8LOG("Unsupported video format\n");
    return NS_ERROR_FAILURE;
  }

  // Cast away constness b/c some of the accessors are non-const
  PlanarYCbCrImage* yuv =
  const_cast<PlanarYCbCrImage *>(static_cast<const PlanarYCbCrImage *>(img.get()));
  // Big-time assumption here that this is all contiguous data coming
  // from getUserMedia or other sources.
  MOZ_ASSERT(yuv);
  if (!yuv->IsValid()) {
    NS_WARNING("PlanarYCbCrImage is not valid");
    return NS_ERROR_FAILURE;
  }
  const PlanarYCbCrImage::Data *data = yuv->GetData();

  if (isYUV420(data) && !data->mCbSkip) { // 420 planar
    mVPXImageWrapper->planes[VPX_PLANE_Y] = data->mYChannel;
    mVPXImageWrapper->planes[VPX_PLANE_U] = data->mCbChannel;
    mVPXImageWrapper->planes[VPX_PLANE_V] = data->mCrChannel;
    mVPXImageWrapper->stride[VPX_PLANE_Y] = data->mYStride;
    mVPXImageWrapper->stride[VPX_PLANE_U] = data->mCbCrStride;
    mVPXImageWrapper->stride[VPX_PLANE_V] = data->mCbCrStride;
  } else {
    uint32_t yPlaneSize = mFrameWidth * mFrameHeight;
    uint32_t halfWidth = (mFrameWidth + 1) / 2;
    uint32_t halfHeight = (mFrameHeight + 1) / 2;
    uint32_t uvPlaneSize = halfWidth * halfHeight;
    if (mI420Frame.IsEmpty()) {
      mI420Frame.SetLength(yPlaneSize + uvPlaneSize * 2);
    }

    MOZ_ASSERT(mI420Frame.Length() >= (yPlaneSize + uvPlaneSize * 2));
    uint8_t *y = mI420Frame.Elements();
    uint8_t *cb = mI420Frame.Elements() + yPlaneSize;
    uint8_t *cr = mI420Frame.Elements() + yPlaneSize + uvPlaneSize;

    if (isYUV420(data) && data->mCbSkip) {
      // If mCbSkip is set, we assume it's nv12 or nv21.
      if (data->mCbChannel < data->mCrChannel) { // nv12
        libyuv::NV12ToI420(data->mYChannel, data->mYStride,
                           data->mCbChannel, data->mCbCrStride,
                           y, mFrameWidth,
                           cb, halfWidth,
                           cr, halfWidth,
                           mFrameWidth, mFrameHeight);
      } else { // nv21
        libyuv::NV21ToI420(data->mYChannel, data->mYStride,
                           data->mCrChannel, data->mCbCrStride,
                           y, mFrameWidth,
                           cb, halfWidth,
                           cr, halfWidth,
                           mFrameWidth, mFrameHeight);
      }
    } else if (isYUV444(data) && !data->mCbSkip) {
      libyuv::I444ToI420(data->mYChannel, data->mYStride,
                         data->mCbChannel, data->mCbCrStride,
                         data->mCrChannel, data->mCbCrStride,
                         y, mFrameWidth,
                         cb, halfWidth,
                         cr, halfWidth,
                         mFrameWidth, mFrameHeight);
    } else if (isYUV422(data) && !data->mCbSkip) {
      libyuv::I422ToI420(data->mYChannel, data->mYStride,
                         data->mCbChannel, data->mCbCrStride,
                         data->mCrChannel, data->mCbCrStride,
                         y, mFrameWidth,
                         cb, halfWidth,
                         cr, halfWidth,
                         mFrameWidth, mFrameHeight);
    } else {
      VP8LOG("Unsupported planar format\n");
      return NS_ERROR_NOT_IMPLEMENTED;
    }

    mVPXImageWrapper->planes[VPX_PLANE_Y] = y;
    mVPXImageWrapper->planes[VPX_PLANE_U] = cb;
    mVPXImageWrapper->planes[VPX_PLANE_V] = cr;
    mVPXImageWrapper->stride[VPX_PLANE_Y] = mFrameWidth;
    mVPXImageWrapper->stride[VPX_PLANE_U] = halfWidth;
    mVPXImageWrapper->stride[VPX_PLANE_V] = halfWidth;
  }

  return NS_OK;
}
开发者ID:Jinwoo-Song,项目名称:gecko-dev,代码行数:100,代码来源:VP8TrackEncoder.cpp

示例2: autoLock

bool
DeprecatedImageClientSingle::UpdateImage(ImageContainer* aContainer,
                               uint32_t aContentFlags)
{
  AutoLockImage autoLock(aContainer);

  Image *image = autoLock.GetImage();
  if (!image) {
    return false;
  }

  if (mLastPaintedImageSerial == image->GetSerial()) {
    return true;
  }

  if (image->GetFormat() == PLANAR_YCBCR &&
      EnsureDeprecatedTextureClient(TEXTURE_YCBCR)) {
    PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image);

    if (ycbcr->AsDeprecatedSharedPlanarYCbCrImage()) {
      AutoLockDeprecatedTextureClient lock(mDeprecatedTextureClient);

      SurfaceDescriptor sd;
      if (!ycbcr->AsDeprecatedSharedPlanarYCbCrImage()->ToSurfaceDescriptor(sd)) {
        return false;
      }

      if (IsSurfaceDescriptorValid(*lock.GetSurfaceDescriptor())) {
        GetForwarder()->DestroySharedSurface(lock.GetSurfaceDescriptor());
      }

      *lock.GetSurfaceDescriptor() = sd;
    } else {
      AutoLockYCbCrClient clientLock(mDeprecatedTextureClient);

      if (!clientLock.Update(ycbcr)) {
        NS_WARNING("failed to update DeprecatedTextureClient (YCbCr)");
        return false;
      }
    }
  } else if (image->GetFormat() == SHARED_TEXTURE &&
             EnsureDeprecatedTextureClient(TEXTURE_SHARED_GL_EXTERNAL)) {
    SharedTextureImage* sharedImage = static_cast<SharedTextureImage*>(image);
    const SharedTextureImage::Data *data = sharedImage->GetData();

    SharedTextureDescriptor texture(data->mShareType,
                                    data->mHandle,
                                    data->mSize,
                                    data->mInverted);
    mDeprecatedTextureClient->SetDescriptor(SurfaceDescriptor(texture));
  } else if (image->GetFormat() == SHARED_RGB &&
             EnsureDeprecatedTextureClient(TEXTURE_SHMEM)) {
    nsIntRect rect(0, 0,
                   image->GetSize().width,
                   image->GetSize().height);
    UpdatePictureRect(rect);

    AutoLockDeprecatedTextureClient lock(mDeprecatedTextureClient);

    SurfaceDescriptor desc;
    if (!static_cast<DeprecatedSharedRGBImage*>(image)->ToSurfaceDescriptor(desc)) {
      return false;
    }
    mDeprecatedTextureClient->SetDescriptor(desc);
#ifdef MOZ_WIDGET_GONK
  } else if (image->GetFormat() == GONK_IO_SURFACE &&
             EnsureDeprecatedTextureClient(TEXTURE_SHARED_GL_EXTERNAL)) {
    nsIntRect rect(0, 0,
                   image->GetSize().width,
                   image->GetSize().height);
    UpdatePictureRect(rect);

    AutoLockDeprecatedTextureClient lock(mDeprecatedTextureClient);

    SurfaceDescriptor desc = static_cast<GonkIOSurfaceImage*>(image)->GetSurfaceDescriptor();
    if (!IsSurfaceDescriptorValid(desc)) {
      return false;
    }
    mDeprecatedTextureClient->SetDescriptor(desc);
  } else if (image->GetFormat() == GRALLOC_PLANAR_YCBCR) {
    EnsureDeprecatedTextureClient(TEXTURE_SHARED_GL_EXTERNAL);

    nsIntRect rect(0, 0,
                   image->GetSize().width,
                   image->GetSize().height);
    UpdatePictureRect(rect);

    AutoLockDeprecatedTextureClient lock(mDeprecatedTextureClient);

    SurfaceDescriptor desc = static_cast<GrallocPlanarYCbCrImage*>(image)->GetSurfaceDescriptor();
    if (!IsSurfaceDescriptorValid(desc)) {
      return false;
    }
    mDeprecatedTextureClient->SetDescriptor(desc);
#endif
  } else {
    nsRefPtr<gfxASurface> surface = image->GetAsSurface();
    MOZ_ASSERT(surface);

    EnsureDeprecatedTextureClient(TEXTURE_SHMEM);
//.........这里部分代码省略.........
开发者ID:,项目名称:,代码行数:101,代码来源:

示例3: GetContainer

void
ImageLayerD3D10::RenderLayer()
{
  ImageContainer *container = GetContainer();
  if (!container) {
    return;
  }

  AutoLockImage autoLock(container);

  Image *image = autoLock.GetImage();
  if (!image) {
    return;
  }

  gfxIntSize size = mScaleMode == SCALE_NONE ? image->GetSize() : mScaleToSize;

  SetEffectTransformAndOpacity();

  ID3D10EffectTechnique *technique;

  if (image->GetFormat() == Image::CAIRO_SURFACE || image->GetFormat() == Image::REMOTE_IMAGE_BITMAP)
  {
    bool hasAlpha = false;

    if (image->GetFormat() == Image::REMOTE_IMAGE_BITMAP) {
      RemoteBitmapImage *remoteImage =
        static_cast<RemoteBitmapImage*>(image);
      
      if (!image->GetBackendData(LayerManager::LAYERS_D3D10)) {
        nsAutoPtr<TextureD3D10BackendData> dat = new TextureD3D10BackendData();
        dat->mTexture = DataToTexture(device(), remoteImage->mData, remoteImage->mStride, remoteImage->mSize);

        if (dat->mTexture) {
          device()->CreateShaderResourceView(dat->mTexture, NULL, getter_AddRefs(dat->mSRView));
          image->SetBackendData(LayerManager::LAYERS_D3D10, dat.forget());
        }
      }

      hasAlpha = remoteImage->mFormat == RemoteImageData::BGRA32;
    } else {
      CairoImage *cairoImage =
        static_cast<CairoImage*>(image);

      if (!cairoImage->mSurface) {
        return;
      }

      if (!image->GetBackendData(LayerManager::LAYERS_D3D10)) {
        nsAutoPtr<TextureD3D10BackendData> dat = new TextureD3D10BackendData();
        dat->mTexture = SurfaceToTexture(device(), cairoImage->mSurface, cairoImage->mSize);

        if (dat->mTexture) {
          device()->CreateShaderResourceView(dat->mTexture, NULL, getter_AddRefs(dat->mSRView));
          image->SetBackendData(LayerManager::LAYERS_D3D10, dat.forget());
        }
      }

      hasAlpha = cairoImage->mSurface->GetContentType() == gfxASurface::CONTENT_COLOR_ALPHA;
    }

    TextureD3D10BackendData *data =
      static_cast<TextureD3D10BackendData*>(image->GetBackendData(LayerManager::LAYERS_D3D10));

    if (!data) {
      return;
    }

    nsRefPtr<ID3D10Device> dev;
    data->mTexture->GetDevice(getter_AddRefs(dev));
    if (dev != device()) {
      return;
    }
    
    if (hasAlpha) {
      if (mFilter == gfxPattern::FILTER_NEAREST) {
        technique = effect()->GetTechniqueByName("RenderRGBALayerPremulPoint");
      } else {
        technique = effect()->GetTechniqueByName("RenderRGBALayerPremul");
      }
    } else {
      if (mFilter == gfxPattern::FILTER_NEAREST) {
        technique = effect()->GetTechniqueByName("RenderRGBLayerPremulPoint");
      } else {
        technique = effect()->GetTechniqueByName("RenderRGBLayerPremul");
      }
    }

    effect()->GetVariableByName("tRGB")->AsShaderResource()->SetResource(data->mSRView);

    effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector(
      ShaderConstantRectD3D10(
        (float)0,
        (float)0,
        (float)size.width,
        (float)size.height)
      );
  } else if (image->GetFormat() == Image::PLANAR_YCBCR) {
    PlanarYCbCrImage *yuvImage =
      static_cast<PlanarYCbCrImage*>(image);
//.........这里部分代码省略.........
开发者ID:michaelrhanson,项目名称:mozilla-central,代码行数:101,代码来源:ImageLayerD3D10.cpp

示例4: GetContainer

void
ImageLayerD3D10::RenderLayer()
{
  ImageContainer *container = GetContainer();
  if (!container) {
    return;
  }

  AutoLockImage autoLock(container);

  Image *image = autoLock.GetImage();
  if (!image) {
    return;
  }

  gfxIntSize size = image->GetSize();

  SetEffectTransformAndOpacity();

  ID3D10EffectTechnique *technique;
  nsRefPtr<IDXGIKeyedMutex> keyedMutex;

  if (image->GetFormat() == ImageFormat::CAIRO_SURFACE ||
      image->GetFormat() == ImageFormat::REMOTE_IMAGE_BITMAP ||
      image->GetFormat() == ImageFormat::REMOTE_IMAGE_DXGI_TEXTURE ||
      image->GetFormat() == ImageFormat::D3D9_RGB32_TEXTURE) {
    NS_ASSERTION(image->GetFormat() != ImageFormat::CAIRO_SURFACE ||
                 !static_cast<CairoImage*>(image)->mSurface ||
                 static_cast<CairoImage*>(image)->mSurface->GetContentType() != gfxASurface::CONTENT_ALPHA,
                 "Image layer has alpha image");
    bool hasAlpha = false;

    nsRefPtr<ID3D10ShaderResourceView> srView = GetImageSRView(image, hasAlpha, getter_AddRefs(keyedMutex));
    if (!srView) {
      return;
    }

    uint8_t shaderFlags = SHADER_PREMUL;
    shaderFlags |= LoadMaskTexture();
    shaderFlags |= hasAlpha
                  ? SHADER_RGBA : SHADER_RGB;
    shaderFlags |= mFilter == gfxPattern::FILTER_NEAREST
                  ? SHADER_POINT : SHADER_LINEAR;
    technique = SelectShader(shaderFlags);


    effect()->GetVariableByName("tRGB")->AsShaderResource()->SetResource(srView);

    effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector(
      ShaderConstantRectD3D10(
        (float)0,
        (float)0,
        (float)size.width,
        (float)size.height)
      );
  } else if (image->GetFormat() == ImageFormat::PLANAR_YCBCR) {
    PlanarYCbCrImage *yuvImage =
      static_cast<PlanarYCbCrImage*>(image);

    if (!yuvImage->IsValid()) {
      return;
    }

    if (!yuvImage->GetBackendData(mozilla::layers::LAYERS_D3D10)) {
      AllocateTexturesYCbCr(yuvImage);
    }

    PlanarYCbCrD3D10BackendData *data =
      static_cast<PlanarYCbCrD3D10BackendData*>(yuvImage->GetBackendData(mozilla::layers::LAYERS_D3D10));

    if (!data) {
      return;
    }

    nsRefPtr<ID3D10Device> dev;
    data->mYTexture->GetDevice(getter_AddRefs(dev));
    if (dev != device()) {
      return;
    }

    // TODO: At some point we should try to deal with mFilter here, you don't
    // really want to use point filtering in the case of NEAREST, since that
    // would also use point filtering for Chroma upsampling. Where most likely
    // the user would only want point filtering for final RGB image upsampling.

    technique = SelectShader(SHADER_YCBCR | LoadMaskTexture());

    effect()->GetVariableByName("tY")->AsShaderResource()->SetResource(data->mYView);
    effect()->GetVariableByName("tCb")->AsShaderResource()->SetResource(data->mCbView);
    effect()->GetVariableByName("tCr")->AsShaderResource()->SetResource(data->mCrView);

    effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector(
      ShaderConstantRectD3D10(
        (float)0,
        (float)0,
        (float)size.width,
        (float)size.height)
      );

    effect()->GetVariableByName("vTextureCoords")->AsVector()->SetFloatVector(
//.........这里部分代码省略.........
开发者ID:Pulfer,项目名称:Pale-Moon,代码行数:101,代码来源:ImageLayerD3D10.cpp

示例5: handle

bool
ImageClientSingle::UpdateImage(ImageContainer* aContainer, uint32_t aContentFlags)
{
  AutoTArray<ImageContainer::OwningImage,4> images;
  uint32_t generationCounter;
  aContainer->GetCurrentImages(&images, &generationCounter);

  if (mLastUpdateGenerationCounter == generationCounter) {
    return true;
  }
  mLastUpdateGenerationCounter = generationCounter;

  for (int32_t i = images.Length() - 1; i >= 0; --i) {
    if (!images[i].mImage->IsValid()) {
      // Don't try to update to an invalid image.
      images.RemoveElementAt(i);
    }
  }
  if (images.IsEmpty()) {
    // This can happen if a ClearAllImages raced with SetCurrentImages from
    // another thread and ClearImagesFromImageBridge ran after the
    // SetCurrentImages call but before UpdateImageClientNow.
    // This can also happen if all images in the list are invalid.
    // We return true because the caller would attempt to recreate the
    // ImageClient otherwise, and that isn't going to help.
    return true;
  }

  nsTArray<Buffer> newBuffers;
  AutoTArray<CompositableForwarder::TimedTextureClient,4> textures;

  for (auto& img : images) {
    Image* image = img.mImage;

#ifdef MOZ_WIDGET_GONK
    if (image->GetFormat() == ImageFormat::OVERLAY_IMAGE) {
      OverlayImage* overlayImage = static_cast<OverlayImage*>(image);
      OverlaySource source;
      if (overlayImage->GetSidebandStream().IsValid()) {
        // Duplicate GonkNativeHandle::NhObj for ipc,
        // since ParamTraits<GonkNativeHandle>::Write() absorbs native_handle_t.
        RefPtr<GonkNativeHandle::NhObj> nhObj = overlayImage->GetSidebandStream().GetDupNhObj();
        GonkNativeHandle handle(nhObj);
        if (!handle.IsValid()) {
          gfxWarning() << "ImageClientSingle::UpdateImage failed in GetDupNhObj";
          return false;
        }
        source.handle() = OverlayHandle(handle);
      } else {
        source.handle() = OverlayHandle(overlayImage->GetOverlayId());
      }
      source.size() = overlayImage->GetSize();
      GetForwarder()->UseOverlaySource(this, source, image->GetPictureRect());
      continue;
    }
#endif

    RefPtr<TextureClient> texture = image->GetTextureClient(this);
    const bool hasTextureClient = !!texture;

    for (int32_t i = mBuffers.Length() - 1; i >= 0; --i) {
      if (mBuffers[i].mImageSerial == image->GetSerial()) {
        if (hasTextureClient) {
          MOZ_ASSERT(image->GetTextureClient(this) == mBuffers[i].mTextureClient);
        } else {
          texture = mBuffers[i].mTextureClient;
        }
        // Remove this element from mBuffers so mBuffers only contains
        // images that aren't present in 'images'
        mBuffers.RemoveElementAt(i);
      }
    }

    if (!texture) {
      // Slow path, we should not be hitting it very often and if we do it means
      // we are using an Image class that is not backed by textureClient and we
      // should fix it.
      if (image->GetFormat() == ImageFormat::PLANAR_YCBCR) {
        PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image);
        const PlanarYCbCrData* data = ycbcr->GetData();
        if (!data) {
          return false;
        }
        texture = TextureClient::CreateForYCbCr(GetForwarder(),
          data->mYSize, data->mCbCrSize, data->mStereoMode,
          TextureFlags::DEFAULT | mTextureFlags
        );
        if (!texture) {
          return false;
        }

        TextureClientAutoLock autoLock(texture, OpenMode::OPEN_WRITE_ONLY);
        if (!autoLock.Succeeded()) {
          return false;
        }

        bool status = UpdateYCbCrTextureClient(texture, *data);
        MOZ_ASSERT(status);
        if (!status) {
          return false;
//.........这里部分代码省略.........
开发者ID:MekliCZ,项目名称:positron,代码行数:101,代码来源:ImageClient.cpp

示例6: autoLock

bool
ImageClientSingle::UpdateImage(ImageContainer* aContainer, uint32_t aContentFlags)
{
  AutoLockImage autoLock(aContainer);

  Image *image = autoLock.GetImage();
  if (!image) {
    return false;
  }

  // Don't try to update to an invalid image. We return true because the caller
  // would attempt to recreate the ImageClient otherwise, and that isn't going
  // to help.
  if (!image->IsValid()) {
    return true;
  }

  if (mLastPaintedImageSerial == image->GetSerial()) {
    return true;
  }

  RefPtr<TextureClient> texture = image->GetTextureClient(this);

  AutoRemoveTexture autoRemoveTexture(this);
  if (texture != mFrontBuffer) {
    autoRemoveTexture.mTexture = mFrontBuffer;
    mFrontBuffer = nullptr;
  }

  if (!texture) {
    // Slow path, we should not be hitting it very often and if we do it means
    // we are using an Image class that is not backed by textureClient and we
    // should fix it.
    if (image->GetFormat() == ImageFormat::PLANAR_YCBCR) {
      PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image);
      const PlanarYCbCrData* data = ycbcr->GetData();
      if (!data) {
        return false;
      }
      texture = TextureClient::CreateForYCbCr(GetForwarder(),
        data->mYSize, data->mCbCrSize, data->mStereoMode,
        TextureFlags::DEFAULT | mTextureFlags
      );
      if (!texture || !texture->Lock(OpenMode::OPEN_WRITE_ONLY)) {
        return false;
      }
      bool status = texture->AsTextureClientYCbCr()->UpdateYCbCr(*data);
      MOZ_ASSERT(status);

      texture->Unlock();
      if (!status) {
        return false;
      }

    } else if (image->GetFormat() == ImageFormat::SURFACE_TEXTURE ||
               image->GetFormat() == ImageFormat::EGLIMAGE) {
      gfx::IntSize size = image->GetSize();

      if (image->GetFormat() == ImageFormat::EGLIMAGE) {
        EGLImageImage* typedImage = static_cast<EGLImageImage*>(image);
        texture = new EGLImageTextureClient(GetForwarder(),
                                            mTextureFlags,
                                            typedImage,
                                            size);
#ifdef MOZ_WIDGET_ANDROID
      } else if (image->GetFormat() == ImageFormat::SURFACE_TEXTURE) {
        SurfaceTextureImage* typedImage = static_cast<SurfaceTextureImage*>(image);
        const SurfaceTextureImage::Data* data = typedImage->GetData();
        texture = new SurfaceTextureClient(GetForwarder(), mTextureFlags,
                                           data->mSurfTex, size,
                                           data->mOriginPos);
#endif
      } else {
        MOZ_ASSERT(false, "Bad ImageFormat.");
      }
    } else {
      RefPtr<gfx::SourceSurface> surface = image->GetAsSourceSurface();
      MOZ_ASSERT(surface);
      texture = CreateTextureClientForDrawing(surface->GetFormat(), image->GetSize(),
                                              gfx::BackendType::NONE, mTextureFlags);
      if (!texture) {
        return false;
      }

      MOZ_ASSERT(texture->CanExposeDrawTarget());

      if (!texture->Lock(OpenMode::OPEN_WRITE_ONLY)) {
        return false;
      }

      {
        // We must not keep a reference to the DrawTarget after it has been unlocked.
        DrawTarget* dt = texture->BorrowDrawTarget();
        if (!dt) {
          gfxWarning() << "ImageClientSingle::UpdateImage failed in BorrowDrawTarget";
          return false;
        }
        MOZ_ASSERT(surface.get());
        dt->CopySurface(surface, IntRect(IntPoint(), surface->GetSize()), IntPoint());
      }
//.........这里部分代码省略.........
开发者ID:lgarner,项目名称:mozilla-central,代码行数:101,代码来源:ImageClient.cpp

示例7: NS_ERROR

VideoData* VideoData::Create(nsVideoInfo& aInfo,
                             ImageContainer* aContainer,
                             PRInt64 aOffset,
                             PRInt64 aTime,
                             PRInt64 aEndTime,
                             const YCbCrBuffer& aBuffer,
                             bool aKeyframe,
                             PRInt64 aTimecode,
                             nsIntRect aPicture)
{
  if (!aContainer) {
    return nsnull;
  }

  // The following situation should never happen unless there is a bug
  // in the decoder
  if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth ||
      aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
    NS_ERROR("C planes with different sizes");
    return nsnull;
  }

  // The following situations could be triggered by invalid input
  if (aPicture.width <= 0 || aPicture.height <= 0) {
    NS_WARNING("Empty picture rect");
    return nsnull;
  }
  if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) ||
      !ValidatePlane(aBuffer.mPlanes[2])) {
    NS_WARNING("Invalid plane size");
    return nsnull;
  }

  // Ensure the picture size specified in the headers can be extracted out of
  // the frame we've been supplied without indexing out of bounds.
  PRUint32 xLimit;
  PRUint32 yLimit;
  if (!AddOverflow32(aPicture.x, aPicture.width, xLimit) ||
      xLimit > aBuffer.mPlanes[0].mStride ||
      !AddOverflow32(aPicture.y, aPicture.height, yLimit) ||
      yLimit > aBuffer.mPlanes[0].mHeight)
  {
    // The specified picture dimensions can't be contained inside the video
    // frame, we'll stomp memory if we try to copy it. Fail.
    NS_WARNING("Overflowing picture rect");
    return nsnull;
  }

  nsAutoPtr<VideoData> v(new VideoData(aOffset,
                                       aTime,
                                       aEndTime,
                                       aKeyframe,
                                       aTimecode,
                                       aInfo.mDisplay));
  // Currently our decoder only knows how to output to PLANAR_YCBCR
  // format.
  Image::Format format = Image::PLANAR_YCBCR;
  v->mImage = aContainer->CreateImage(&format, 1);
  if (!v->mImage) {
    return nsnull;
  }
  NS_ASSERTION(v->mImage->GetFormat() == Image::PLANAR_YCBCR,
               "Wrong format?");
  PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get());

  PlanarYCbCrImage::Data data;
  data.mYChannel = aBuffer.mPlanes[0].mData;
  data.mYSize = gfxIntSize(aBuffer.mPlanes[0].mWidth, aBuffer.mPlanes[0].mHeight);
  data.mYStride = aBuffer.mPlanes[0].mStride;
  data.mCbChannel = aBuffer.mPlanes[1].mData;
  data.mCrChannel = aBuffer.mPlanes[2].mData;
  data.mCbCrSize = gfxIntSize(aBuffer.mPlanes[1].mWidth, aBuffer.mPlanes[1].mHeight);
  data.mCbCrStride = aBuffer.mPlanes[1].mStride;
  data.mPicX = aPicture.x;
  data.mPicY = aPicture.y;
  data.mPicSize = gfxIntSize(aPicture.width, aPicture.height);
  data.mStereoMode = aInfo.mStereoMode;

  videoImage->SetData(data); // Copies buffer
  return v.forget();
}
开发者ID:ehsan,项目名称:mozilla-history,代码行数:81,代码来源:nsBuiltinDecoderReader.cpp

示例8: GetContainer

void
ImageLayerOGL::RenderLayer(int,
                           const nsIntPoint& aOffset)
{
  nsRefPtr<ImageContainer> container = GetContainer();

  if (!container || mOGLManager->CompositingDisabled())
    return;

  mOGLManager->MakeCurrent();

  AutoLockImage autoLock(container);

  Image *image = autoLock.GetImage();
  if (!image) {
    return;
  }

  NS_ASSERTION(image->GetFormat() != REMOTE_IMAGE_BITMAP,
    "Remote images aren't handled yet in OGL layers!");

  if (image->GetFormat() == PLANAR_YCBCR) {
    PlanarYCbCrImage *yuvImage =
      static_cast<PlanarYCbCrImage*>(image);

    if (!yuvImage->IsValid()) {
      return;
    }

    PlanarYCbCrOGLBackendData *data =
      static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LAYERS_OPENGL));

    if (data && data->mTextures->GetGLContext() != gl()) {
      // If these textures were allocated by another layer manager,
      // clear them out and re-allocate below.
      data = nullptr;
      yuvImage->SetBackendData(LAYERS_OPENGL, nullptr);
    }

    if (!data) {
      AllocateTexturesYCbCr(yuvImage);
      data = static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LAYERS_OPENGL));
    }

    if (!data || data->mTextures->GetGLContext() != gl()) {
      // XXX - Can this ever happen? If so I need to fix this!
      return;
    }

    gl()->MakeCurrent();
    gl()->fActiveTexture(LOCAL_GL_TEXTURE2);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[2].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE1);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[1].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[0].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    
    ShaderProgramOGL *program = mOGLManager->GetProgram(YCbCrLayerProgramType,
                                                        GetMaskLayer());

    program->Activate();
    program->SetLayerQuadRect(nsIntRect(0, 0,
                                        yuvImage->GetSize().width,
                                        yuvImage->GetSize().height));
    program->SetLayerTransform(GetEffectiveTransform());
    program->SetLayerOpacity(GetEffectiveOpacity());
    program->SetRenderOffset(aOffset);
    program->SetYCbCrTextureUnits(0, 1, 2);
    program->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuadWithTextureRect(program,
                                                yuvImage->GetData()->GetPictureRect(),
                                                nsIntSize(yuvImage->GetData()->mYSize.width,
                                                          yuvImage->GetData()->mYSize.height));

    // We shouldn't need to do this, but do it anyway just in case
    // someone else forgets.
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
  } else if (image->GetFormat() == CAIRO_SURFACE) {
    CairoImage *cairoImage =
      static_cast<CairoImage*>(image);

    if (!cairoImage->mSurface) {
      return;
    }

    NS_ASSERTION(cairoImage->mSurface->GetContentType() != gfxASurface::CONTENT_ALPHA,
                 "Image layer has alpha image");

    CairoOGLBackendData *data =
      static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LAYERS_OPENGL));

    if (data && data->mTexture.GetGLContext() != gl()) {
      // If this texture was allocated by another layer manager, clear
      // it out and re-allocate below.
      data = nullptr;
      cairoImage->SetBackendData(LAYERS_OPENGL, nullptr);
//.........这里部分代码省略.........
开发者ID:memorais,项目名称:TC2_impl,代码行数:101,代码来源:ImageLayerOGL.cpp

示例9: avcodec_get_edge_width

int
FFmpegH264Decoder::AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
                                              AVFrame* aFrame)
{
  // Older versions of ffmpeg require that edges be allocated* around* the
  // actual image.
  int edgeWidth = avcodec_get_edge_width();
  int decodeWidth = aCodecContext->width + edgeWidth * 2;
  int decodeHeight = aCodecContext->height + edgeWidth * 2;

  // Align width and height to possibly speed up decode.
  int stride_align[AV_NUM_DATA_POINTERS];
  avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight,
                            stride_align);

  // Get strides for each plane.
  av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt,
                          decodeWidth);

  // Let FFmpeg set up its YUV plane pointers and tell us how much memory we
  // need.
  // Note that we're passing |nullptr| here as the base address as we haven't
  // allocated our image yet. We will adjust |aFrame->data| below.
  size_t allocSize =
    av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight,
                           nullptr /* base address */, aFrame->linesize);

  nsRefPtr<Image> image =
    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
  PlanarYCbCrImage* ycbcr = reinterpret_cast<PlanarYCbCrImage*>(image.get());
  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize);

  if (!buffer) {
    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
    return -1;
  }

  // Now that we've allocated our image, we can add its address to the offsets
  // set by |av_image_fill_pointers| above. We also have to add |edgeWidth|
  // pixels of padding here.
  for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    // The C planes are half the resolution of the Y plane, so we need to halve
    // the edge width here.
    uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1);

    // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the
    // top of the frame, plus |edgeWidth| pixels from the left of the frame.
    aFrame->data[i] += reinterpret_cast<ptrdiff_t>(
      buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth);
  }

  // Unused, but needs to be non-zero to keep ffmpeg happy.
  aFrame->type = GECKO_FRAME_TYPE;

  aFrame->extended_data = aFrame->data;
  aFrame->width = aCodecContext->width;
  aFrame->height = aCodecContext->height;

  mozilla::layers::PlanarYCbCrData data;
  PlanarYCbCrDataFromAVFrame(data, aFrame);
  ycbcr->SetDataNoCopy(data);

  mCurrentImage.swap(image);

  return 0;
}
开发者ID:Wrichik1999,项目名称:gecko-dev,代码行数:66,代码来源:FFmpegH264Decoder.cpp

示例10: GetContainer

void
ImageLayerD3D10::RenderLayer()
{
  ImageContainer *container = GetContainer();
  if (!container) {
    return;
  }

  AutoLockImage autoLock(container);

  Image *image = autoLock.GetImage();
  if (!image) {
    return;
  }

  gfxIntSize size = mScaleMode == SCALE_NONE ? image->GetSize() : mScaleToSize;

  SetEffectTransformAndOpacity();

  ID3D10EffectTechnique *technique;

  if (image->GetFormat() == Image::CAIRO_SURFACE || image->GetFormat() == Image::REMOTE_IMAGE_BITMAP)
  {
    bool hasAlpha = false;

    nsRefPtr<ID3D10ShaderResourceView> srView = GetImageSRView(image, hasAlpha);
    if (!srView) {
      return;
    }
    
    PRUint8 shaderFlags = SHADER_PREMUL;
    shaderFlags |= LoadMaskTexture();
    shaderFlags |= hasAlpha
                  ? SHADER_RGBA : SHADER_RGB;
    shaderFlags |= mFilter == gfxPattern::FILTER_NEAREST
                  ? SHADER_POINT : SHADER_LINEAR;
    technique = SelectShader(shaderFlags);


    effect()->GetVariableByName("tRGB")->AsShaderResource()->SetResource(srView);

    effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector(
      ShaderConstantRectD3D10(
        (float)0,
        (float)0,
        (float)size.width,
        (float)size.height)
      );
  } else if (image->GetFormat() == Image::PLANAR_YCBCR) {
    PlanarYCbCrImage *yuvImage =
      static_cast<PlanarYCbCrImage*>(image);

    if (!yuvImage->mBufferSize) {
      return;
    }

    if (!yuvImage->GetBackendData(LayerManager::LAYERS_D3D10)) {
      AllocateTexturesYCbCr(yuvImage);
    }

    PlanarYCbCrD3D10BackendData *data =
      static_cast<PlanarYCbCrD3D10BackendData*>(yuvImage->GetBackendData(LayerManager::LAYERS_D3D10));

    if (!data) {
      return;
    }

    nsRefPtr<ID3D10Device> dev;
    data->mYTexture->GetDevice(getter_AddRefs(dev));
    if (dev != device()) {
      return;
    }

    // TODO: At some point we should try to deal with mFilter here, you don't
    // really want to use point filtering in the case of NEAREST, since that
    // would also use point filtering for Chroma upsampling. Where most likely
    // the user would only want point filtering for final RGB image upsampling.

    technique = SelectShader(SHADER_YCBCR | LoadMaskTexture());

    effect()->GetVariableByName("tY")->AsShaderResource()->SetResource(data->mYView);
    effect()->GetVariableByName("tCb")->AsShaderResource()->SetResource(data->mCbView);
    effect()->GetVariableByName("tCr")->AsShaderResource()->SetResource(data->mCrView);

    /*
     * Send 3d control data and metadata to NV3DVUtils
     */
    if (GetNv3DVUtils()) {
      Nv_Stereo_Mode mode;
      switch (yuvImage->mData.mStereoMode) {
      case STEREO_MODE_LEFT_RIGHT:
        mode = NV_STEREO_MODE_LEFT_RIGHT;
        break;
      case STEREO_MODE_RIGHT_LEFT:
        mode = NV_STEREO_MODE_RIGHT_LEFT;
        break;
      case STEREO_MODE_BOTTOM_TOP:
        mode = NV_STEREO_MODE_BOTTOM_TOP;
        break;
      case STEREO_MODE_TOP_BOTTOM:
//.........这里部分代码省略.........
开发者ID:almet,项目名称:mozilla-central,代码行数:101,代码来源:ImageLayerD3D10.cpp

示例11: autoLock

bool
ImageClientSingle::UpdateImage(ImageContainer* aContainer,
                               uint32_t aContentFlags)
{
  AutoLockImage autoLock(aContainer);

  Image *image = autoLock.GetImage();
  if (!image) {
    return false;
  }

  if (mLastPaintedImageSerial == image->GetSerial()) {
    return true;
  }

  if (image->AsSharedImage() && image->AsSharedImage()->GetTextureClient()) {
    // fast path: no need to allocate and/or copy image data
    RefPtr<TextureClient> texture = image->AsSharedImage()->GetTextureClient();

    if (texture->IsSharedWithCompositor()) {
      // XXX - temporary fix for bug 911941
      // This will be changed with bug 912907
      return false;
    }

    if (mFrontBuffer) {
      RemoveTextureClient(mFrontBuffer);
    }
    mFrontBuffer = texture;
    AddTextureClient(texture);
    GetForwarder()->UpdatedTexture(this, texture, nullptr);
    GetForwarder()->UseTexture(this, texture);
  } else if (image->GetFormat() == PLANAR_YCBCR) {
    PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image);
    const PlanarYCbCrData* data = ycbcr->GetData();
    if (!data) {
      return false;
    }

    if (mFrontBuffer && mFrontBuffer->IsImmutable()) {
      RemoveTextureClient(mFrontBuffer);
      mFrontBuffer = nullptr;
    }

    bool bufferCreated = false;
    if (!mFrontBuffer) {
      mFrontBuffer = CreateBufferTextureClient(gfx::FORMAT_YUV, TEXTURE_FLAGS_DEFAULT);
      gfx::IntSize ySize(data->mYSize.width, data->mYSize.height);
      gfx::IntSize cbCrSize(data->mCbCrSize.width, data->mCbCrSize.height);
      if (!mFrontBuffer->AsTextureClientYCbCr()->AllocateForYCbCr(ySize, cbCrSize, data->mStereoMode)) {
        mFrontBuffer = nullptr;
        return false;
      }
      bufferCreated = true;
    }

    if (!mFrontBuffer->Lock(OPEN_READ_WRITE)) {
      return false;
    }
    bool status = mFrontBuffer->AsTextureClientYCbCr()->UpdateYCbCr(*data);
    mFrontBuffer->Unlock();

    if (bufferCreated) {
      AddTextureClient(mFrontBuffer);
    }

    if (status) {
      GetForwarder()->UpdatedTexture(this, mFrontBuffer, nullptr);
      GetForwarder()->UseTexture(this, mFrontBuffer);
    } else {
      MOZ_ASSERT(false);
      return false;
    }

  } else if (image->GetFormat() == SHARED_TEXTURE) {
    SharedTextureImage* sharedImage = static_cast<SharedTextureImage*>(image);
    const SharedTextureImage::Data *data = sharedImage->GetData();
    gfx::IntSize size = gfx::IntSize(image->GetSize().width, image->GetSize().height);

    if (mFrontBuffer) {
      RemoveTextureClient(mFrontBuffer);
      mFrontBuffer = nullptr;
    }

    RefPtr<SharedTextureClientOGL> buffer = new SharedTextureClientOGL(mTextureFlags);
    buffer->InitWith(data->mHandle, size, data->mShareType, data->mInverted);
    mFrontBuffer = buffer;

    AddTextureClient(mFrontBuffer);
    GetForwarder()->UseTexture(this, mFrontBuffer);
  } else {
    nsRefPtr<gfxASurface> surface = image->GetAsSurface();
    MOZ_ASSERT(surface);

    gfx::IntSize size = gfx::IntSize(image->GetSize().width, image->GetSize().height);

    if (mFrontBuffer &&
        (mFrontBuffer->IsImmutable() || mFrontBuffer->GetSize() != size)) {
      RemoveTextureClient(mFrontBuffer);
      mFrontBuffer = nullptr;
//.........这里部分代码省略.........
开发者ID:jalbertbowden,项目名称:mozilla-central,代码行数:101,代码来源:ImageClient.cpp

示例12: DOM_CAMERA_LOGI

void
GonkCameraPreview::ReceiveFrame(PRUint8 *aData, PRUint32 aLength)
{
  DOM_CAMERA_LOGI("%s:%d : this=%p\n", __func__, __LINE__, this);

  if (mInput->HaveEnoughBuffered(TRACK_VIDEO)) {
    if (mDiscardedFrameCount == 0) {
      DOM_CAMERA_LOGI("mInput has enough data buffered, starting to discard\n");
    }
    ++mDiscardedFrameCount;
    return;
  } else if (mDiscardedFrameCount) {
    DOM_CAMERA_LOGI("mInput needs more data again; discarded %d frames in a row\n", mDiscardedFrameCount);
    mDiscardedFrameCount = 0;
  }

  switch (mFormat) {
    case GonkCameraHardware::PREVIEW_FORMAT_YUV420SP:
      {
        // de-interlace the u and v planes
        uint8_t* y = aData;
        uint32_t yN = mWidth * mHeight;

        NS_ASSERTION((yN & 0x3) == 0, "Invalid image dimensions!");

        uint32_t uvN = yN / 4;
        uint32_t* src = (uint32_t*)( y + yN );
        uint32_t* d = new uint32_t[ uvN / 2 ];
        uint32_t* u = d;
        uint32_t* v = u + uvN / 4;

        // we're handling pairs of 32-bit words, so divide by 8
        NS_ASSERTION((uvN & 0x7) == 0, "Invalid image dimensions!");
        uvN /= 8;

        while (uvN--) {
          uint32_t src0 = *src++;
          uint32_t src1 = *src++;

          uint32_t u0;
          uint32_t v0;
          uint32_t u1;
          uint32_t v1;

          DEINTERLACE( u0, v0, src0, src1 );

          src0 = *src++;
          src1 = *src++;

          DEINTERLACE( u1, v1, src0, src1 );

          *u++ = u0;
          *u++ = u1;
          *v++ = v0;
          *v++ = v1;
        }

        memcpy(y + yN, d, yN / 2);
        delete[] d;
      }
      break;

    case GonkCameraHardware::PREVIEW_FORMAT_YUV420P:
      // no transformating required
      break;

    default:
      // in a format we don't handle, get out of here
      return;
  }

  Image::Format format = Image::PLANAR_YCBCR;
  nsRefPtr<Image> image = mImageContainer->CreateImage(&format, 1);
  image->AddRef();
  PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(image.get());

  /**
   * If you change either lumaBpp or chromaBpp, make sure the
   * assertions below still hold.
   */
  const PRUint8 lumaBpp = 8;
  const PRUint8 chromaBpp = 4;
  PlanarYCbCrImage::Data data;
  data.mYChannel = aData;
  data.mYSize = gfxIntSize(mWidth, mHeight);

  data.mYStride = mWidth * lumaBpp;
  NS_ASSERTION((data.mYStride & 0x7) == 0, "Invalid image dimensions!");
  data.mYStride /= 8;

  data.mCbCrStride = mWidth * chromaBpp;
  NS_ASSERTION((data.mCbCrStride & 0x7) == 0, "Invalid image dimensions!");
  data.mCbCrStride /= 8;

  data.mCbChannel = aData + mHeight * data.mYStride;
  data.mCrChannel = data.mCbChannel + mHeight * data.mCbCrStride / 2;
  data.mCbCrSize = gfxIntSize(mWidth / 2, mHeight / 2);
  data.mPicX = 0;
  data.mPicY = 0;
  data.mPicSize = gfxIntSize(mWidth, mHeight);
//.........这里部分代码省略.........
开发者ID:eric30,项目名称:mozilla-central-1,代码行数:101,代码来源:GonkCameraPreview.cpp

示例13: v

/* static */
already_AddRefed<VideoData>
VideoData::Create(const VideoInfo& aInfo,
                  ImageContainer* aContainer,
                  Image* aImage,
                  int64_t aOffset,
                  int64_t aTime,
                  int64_t aDuration,
                  const YCbCrBuffer& aBuffer,
                  bool aKeyframe,
                  int64_t aTimecode,
                  const IntRect& aPicture)
{
  if (!aImage && !aContainer) {
    // Create a dummy VideoData with no image. This gives us something to
    // send to media streams if necessary.
    RefPtr<VideoData> v(new VideoData(aOffset,
                                        aTime,
                                        aDuration,
                                        aKeyframe,
                                        aTimecode,
                                        aInfo.mDisplay,
                                        0));
    return v.forget();
  }

  // The following situation should never happen unless there is a bug
  // in the decoder
  if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth ||
      aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
    NS_ERROR("C planes with different sizes");
    return nullptr;
  }

  // The following situations could be triggered by invalid input
  if (aPicture.width <= 0 || aPicture.height <= 0) {
    // In debug mode, makes the error more noticeable
    MOZ_ASSERT(false, "Empty picture rect");
    return nullptr;
  }
  if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) ||
      !ValidatePlane(aBuffer.mPlanes[2])) {
    NS_WARNING("Invalid plane size");
    return nullptr;
  }

  // Ensure the picture size specified in the headers can be extracted out of
  // the frame we've been supplied without indexing out of bounds.
  CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
  CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
  if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride ||
      !yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight)
  {
    // The specified picture dimensions can't be contained inside the video
    // frame, we'll stomp memory if we try to copy it. Fail.
    NS_WARNING("Overflowing picture rect");
    return nullptr;
  }

  RefPtr<VideoData> v(new VideoData(aOffset,
                                      aTime,
                                      aDuration,
                                      aKeyframe,
                                      aTimecode,
                                      aInfo.mDisplay,
                                      0));
#ifdef MOZ_WIDGET_GONK
  const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
  const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
  const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];
#endif

  if (!aImage) {
    // Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR
    // format.
#ifdef MOZ_WIDGET_GONK
    if (IsYV12Format(Y, Cb, Cr) && !IsInEmulator()) {
      v->mImage = new layers::GrallocImage();
    }
#endif
    if (!v->mImage) {
      v->mImage = aContainer->CreatePlanarYCbCrImage();
    }
  } else {
    v->mImage = aImage;
  }

  if (!v->mImage) {
    return nullptr;
  }
  NS_ASSERTION(v->mImage->GetFormat() == ImageFormat::PLANAR_YCBCR ||
               v->mImage->GetFormat() == ImageFormat::GRALLOC_PLANAR_YCBCR,
               "Wrong format?");
  PlanarYCbCrImage* videoImage = v->mImage->AsPlanarYCbCrImage();
  MOZ_ASSERT(videoImage);

  bool shouldCopyData = (aImage == nullptr);
  if (!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture,
                                      shouldCopyData)) {
    return nullptr;
//.........这里部分代码省略.........
开发者ID:cliqz-oss,项目名称:browser-f,代码行数:101,代码来源:MediaData.cpp

示例14: avcodec_get_edge_width

int
FFmpegH264Decoder<LIBAV_VER>::AllocateYUV420PVideoBuffer(
  AVCodecContext* aCodecContext, AVFrame* aFrame)
{
  bool needAlign = aCodecContext->codec->capabilities & CODEC_CAP_DR1;
  int edgeWidth =  needAlign ? avcodec_get_edge_width() : 0;
  int decodeWidth = aCodecContext->width + edgeWidth * 2;
  // Make sure the decodeWidth is a multiple of 32, so a UV plane stride will be
  // a multiple of 16. FFmpeg uses SSE2 accelerated code to copy a frame line by
  // line.
  decodeWidth = (decodeWidth + 31) & ~31;
  int decodeHeight = aCodecContext->height + edgeWidth * 2;

  if (needAlign) {
    // Align width and height to account for CODEC_FLAG_EMU_EDGE.
    int stride_align[AV_NUM_DATA_POINTERS];
    avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight,
                              stride_align);
  }

  // Get strides for each plane.
  av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt,
                          decodeWidth);

  // Let FFmpeg set up its YUV plane pointers and tell us how much memory we
  // need.
  // Note that we're passing |nullptr| here as the base address as we haven't
  // allocated our image yet. We will adjust |aFrame->data| below.
  size_t allocSize =
    av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight,
                           nullptr /* base address */, aFrame->linesize);

  nsRefPtr<Image> image =
    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
  PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image.get());
  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize + 64);
  // FFmpeg requires a 16/32 bytes-aligned buffer, align it on 64 to be safe
  buffer = reinterpret_cast<uint8_t*>((reinterpret_cast<uintptr_t>(buffer) + 63) & ~63);

  if (!buffer) {
    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
    return -1;
  }

  // Now that we've allocated our image, we can add its address to the offsets
  // set by |av_image_fill_pointers| above. We also have to add |edgeWidth|
  // pixels of padding here.
  for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    // The C planes are half the resolution of the Y plane, so we need to halve
    // the edge width here.
    uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1);

    // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the
    // top of the frame, plus |edgeWidth| pixels from the left of the frame.
    aFrame->data[i] += reinterpret_cast<ptrdiff_t>(
      buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth);
  }

  // Unused, but needs to be non-zero to keep ffmpeg happy.
  aFrame->type = GECKO_FRAME_TYPE;

  aFrame->extended_data = aFrame->data;
  aFrame->width = aCodecContext->width;
  aFrame->height = aCodecContext->height;

  aFrame->opaque = static_cast<void*>(image.forget().take());

  return 0;
}
开发者ID:norihirou,项目名称:gecko-dev,代码行数:69,代码来源:FFmpegH264Decoder.cpp

示例15: avcodec_get_edge_width

int
FFmpegH264Decoder<LIBAV_VER>::AllocateYUV420PVideoBuffer(
  AVCodecContext* aCodecContext, AVFrame* aFrame)
{
  bool needAlign = aCodecContext->codec->capabilities & CODEC_CAP_DR1;
  bool needEdge = !(aCodecContext->flags & CODEC_FLAG_EMU_EDGE);
  int edgeWidth = needEdge ? avcodec_get_edge_width() : 0;

  int decodeWidth = aCodecContext->width + edgeWidth * 2;
  int decodeHeight = aCodecContext->height + edgeWidth * 2;

  if (needAlign) {
    // Align width and height to account for CODEC_CAP_DR1.
    // Make sure the decodeWidth is a multiple of 64, so a UV plane stride will be
    // a multiple of 32. FFmpeg uses SSE3 accelerated code to copy a frame line by
    // line.
    // VP9 decoder uses MOVAPS/VEX.256 which requires 32-bytes aligned memory.
    decodeWidth = (decodeWidth + 63) & ~63;
    decodeHeight = (decodeHeight + 63) & ~63;
  }

  PodZero(&aFrame->data[0], AV_NUM_DATA_POINTERS);
  PodZero(&aFrame->linesize[0], AV_NUM_DATA_POINTERS);

  int pitch = decodeWidth;
  int chroma_pitch  = (pitch + 1) / 2;
  int chroma_height = (decodeHeight +1) / 2;

  // Get strides for each plane.
  aFrame->linesize[0] = pitch;
  aFrame->linesize[1] = aFrame->linesize[2] = chroma_pitch;

  size_t allocSize = pitch * decodeHeight + (chroma_pitch * chroma_height) * 2;

  RefPtr<Image> image =
    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
  PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image.get());
  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize + 64);
  // FFmpeg requires a 16/32 bytes-aligned buffer, align it on 64 to be safe
  buffer = reinterpret_cast<uint8_t*>((reinterpret_cast<uintptr_t>(buffer) + 63) & ~63);

  if (!buffer) {
    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
    return -1;
  }

  int offsets[3] = {
    0,
    pitch * decodeHeight,
    pitch * decodeHeight + chroma_pitch * chroma_height };

  // Add a horizontal bar |edgeWidth| pixels high at the
  // top of the frame, plus |edgeWidth| pixels from the left of the frame.
  int planesEdgeWidth[3] = {
    edgeWidth * aFrame->linesize[0] + edgeWidth,
    edgeWidth / 2 * aFrame->linesize[1] + edgeWidth / 2,
    edgeWidth / 2 * aFrame->linesize[2] + edgeWidth / 2 };

  for (uint32_t i = 0; i < 3; i++) {
    aFrame->data[i] = buffer + offsets[i] + planesEdgeWidth[i];
  }

  aFrame->extended_data = aFrame->data;
  aFrame->width = aCodecContext->width;
  aFrame->height = aCodecContext->height;

  aFrame->opaque = static_cast<void*>(image.forget().take());

  aFrame->type = FF_BUFFER_TYPE_USER;
  aFrame->reordered_opaque = aCodecContext->reordered_opaque;
#if LIBAVCODEC_VERSION_MAJOR == 53
  if (aCodecContext->pkt) {
    aFrame->pkt_pts = aCodecContext->pkt->pts;
  }
#endif
  return 0;
}
开发者ID:70599,项目名称:Waterfox,代码行数:77,代码来源:FFmpegH264Decoder.cpp


注:本文中的PlanarYCbCrImage类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。