本文整理汇总了C++中ImageFrame类的典型用法代码示例。如果您正苦于以下问题:C++ ImageFrame类的具体用法?C++ ImageFrame怎么用?C++ ImageFrame使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ImageFrame类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: lock
const ScaledImageFragment* ImageFrameGenerator::tryToDecodeAndScale(const SkISize& scaledSize)
{
RefPtr<SharedBuffer> data;
bool allDataReceived = false;
{
MutexLocker lock(m_dataMutex);
// FIXME: We should do a shallow copy instead. Now we're restricted by the API of SharedBuffer.
data = m_data->copy();
allDataReceived = m_allDataReceived;
}
OwnPtr<ImageDecoder> decoder(adoptPtr(ImageDecoder::create(*data.get(), ImageSource::AlphaPremultiplied, ImageSource::GammaAndColorProfileApplied)));
if (!decoder && m_imageDecoderFactory)
decoder = m_imageDecoderFactory->create();
if (!decoder)
return 0;
decoder->setData(data.get(), allDataReceived);
ImageFrame* frame = decoder->frameBufferAtIndex(0);
if (!frame || frame->status() == ImageFrame::FrameEmpty)
return 0;
bool isComplete = frame->status() == ImageFrame::FrameComplete;
SkBitmap fullSizeBitmap = frame->getSkBitmap();
ASSERT(fullSizeBitmap.width() == m_fullSize.width() && fullSizeBitmap.height() == m_fullSize.height());
const ScaledImageFragment* fullSizeImage = ImageDecodingStore::instance()->insertAndLockCache(
this, ScaledImageFragment::create(m_fullSize, fullSizeBitmap, isComplete));
if (m_fullSize == scaledSize)
return fullSizeImage;
return tryToScale(fullSizeImage, scaledSize);
}
示例2: outputRows
template <J_COLOR_SPACE colorSpace> bool outputRows(JPEGImageReader* reader, ImageFrame& buffer)
{
JSAMPARRAY samples = reader->samples();
jpeg_decompress_struct* info = reader->info();
int width = info->output_width;
while (info->output_scanline < info->output_height) {
// jpeg_read_scanlines will increase the scanline counter, so we
// save the scanline before calling it.
int y = info->output_scanline;
// Request one scanline: returns 0 or 1 scanlines.
if (jpeg_read_scanlines(info, samples, 1) != 1)
return false;
#if USE(QCMSLIB)
if (reader->colorTransform() && colorSpace == JCS_RGB)
qcms_transform_data(reader->colorTransform(), *samples, *samples, width);
#endif
ImageFrame::PixelData* pixel = buffer.getAddr(0, y);
for (int x = 0; x < width; ++pixel, ++x)
setPixel<colorSpace>(buffer, pixel, samples, x);
}
buffer.setPixelsChanged(true);
return true;
}
示例3: prepareLazyDecodedFrames
sk_sp<SkImage> DeferredImageDecoder::createFrameAtIndex(size_t index) {
if (m_frameGenerator && m_frameGenerator->decodeFailed())
return nullptr;
prepareLazyDecodedFrames();
if (index < m_frameData.size()) {
DeferredFrameData* frameData = &m_frameData[index];
if (m_actualDecoder)
frameData->m_frameBytes = m_actualDecoder->frameBytesAtIndex(index);
else
frameData->m_frameBytes = m_size.area() * sizeof(ImageFrame::PixelData);
// ImageFrameGenerator has the latest known alpha state. There will be a
// performance boost if this frame is opaque.
DCHECK(m_frameGenerator);
return createFrameImageAtIndex(index, !m_frameGenerator->hasAlpha(index));
}
if (!m_actualDecoder || m_actualDecoder->failed())
return nullptr;
ImageFrame* frame = m_actualDecoder->frameBufferAtIndex(index);
if (!frame || frame->getStatus() == ImageFrame::FrameEmpty)
return nullptr;
return (frame->getStatus() == ImageFrame::FrameComplete)
? frame->finalizePixelsAndGetImage()
: SkImage::MakeFromBitmap(frame->bitmap());
}
示例4: wxFrame
ListesAuxShowListeImages::ListesAuxShowListeImages(wxWindow *parent, wxString itemSelected, int &nbImg)
: wxFrame(parent, wxID_ANY, wxEmptyString) {
try {
ObjPhotoGenealogy *objPhotoGenealogy = new ObjPhotoGenealogy();
vector<wxImage> imgVect;
imgVect = model->arrayImageMicroFromNomLatin(itemSelected);
wxArrayInt idVect = model->idImageMicroArrayFromNomLatin(itemSelected);
for (int ijk = 0; ijk < imgVect.size(); ijk++) {
wxImage imgAux = imgVect[ijk];
wxString title = objPhotoGenealogy->getFullNomFromIdPhoto(idVect[ijk]);
wxRect rect = parent->GetScreenRect();
int x0 = rect.x ;
int y0 = rect.y ;
int w0 = rect.width ;
int xPos = x0+w0+x0+(nbImg+ijk)*16;
int yPos = y0+(nbImg+ijk)*16;
ImageFrame *imageFrame = new ImageFrame(parent, imgAux, title, wxPoint(xPos, yPos), wxDEFAULT_FRAME_STYLE);
imageFrame->SetBackgroundColour(Couleurs::backgColor);
imageFrame->Show();
}
nbImg = nbImg + (int)imgVect.size();
} catch (const exception &e) {
Aux::logsStr("", e.what(), logPut);
}
}
示例5: frameHasAlphaAtIndex
bool ImageSource::frameHasAlphaAtIndex(size_t index)
{
#ifdef ANDROID_ANIMATED_GIF
if (m_decoder.m_gifDecoder) {
ImageFrame* buffer =
m_decoder.m_gifDecoder->frameBufferAtIndex(index);
if (!buffer || buffer->status() == ImageFrame::FrameEmpty)
return false;
return buffer->hasAlpha();
}
#else
SkASSERT(0 == index);
#endif
if (NULL == m_decoder.m_image)
return true; // if we're not sure, assume the worse-case
const PrivateAndroidImageSourceRec& decoder = *m_decoder.m_image;
// if we're 16bit, we know even without all the data available
if (decoder.bitmap().getConfig() == SkBitmap::kRGB_565_Config)
return false;
if (!decoder.fAllDataReceived)
return true; // if we're not sure, assume the worse-case
return !decoder.bitmap().isOpaque();
}
示例6: applyColorProfile
void WEBPImageDecoder::applyColorProfile(const uint8_t* data, size_t size, ImageFrame& buffer)
{
int width;
int decodedHeight;
if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0))
return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062
if (decodedHeight <= 0)
return;
if (!m_haveReadProfile) {
readColorProfile(data, size);
m_haveReadProfile = true;
}
ASSERT(width == scaledSize().width());
ASSERT(decodedHeight <= scaledSize().height());
for (int y = m_decodedHeight; y < decodedHeight; ++y) {
uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(0, y));
if (qcms_transform* transform = colorTransform())
qcms_transform_data_type(transform, row, row, width, QCMS_OUTPUT_RGBX);
uint8_t* pixel = row;
for (int x = 0; x < width; ++x, pixel += 4)
buffer.setRGBA(x, y, pixel[0], pixel[1], pixel[2], pixel[3]);
}
m_decodedHeight = decodedHeight;
}
示例7: TEST
TEST(GIFImageDecoderTest, parseAndDecodeByteByByte)
{
OwnPtr<GIFImageDecoder> decoder = createDecoder();
RefPtr<SharedBuffer> data = readFile("/LayoutTests/fast/images/resources/animated-gif-with-offsets.gif");
ASSERT_TRUE(data.get());
size_t frameCount = 0;
size_t framesDecoded = 0;
// Pass data to decoder byte by byte.
for (size_t length = 1; length <= data->size(); ++length) {
RefPtr<SharedBuffer> tempData = SharedBuffer::create(data->data(), length);
decoder->setData(tempData.get(), length == data->size());
EXPECT_LE(frameCount, decoder->frameCount());
frameCount = decoder->frameCount();
ImageFrame* frame = decoder->frameBufferAtIndex(frameCount - 1);
if (frame && frame->status() == ImageFrame::FrameComplete && framesDecoded < frameCount)
++framesDecoded;
}
EXPECT_EQ(5u, decoder->frameCount());
EXPECT_EQ(5u, framesDecoded);
EXPECT_EQ(cAnimationLoopInfinite, decoder->repetitionCount());
}
示例8: prepareLazyDecodedFrames
bool DeferredImageDecoder::createFrameAtIndex(size_t index, SkBitmap* bitmap)
{
prepareLazyDecodedFrames();
if (index < m_frameData.size()) {
// ImageFrameGenerator has the latest known alpha state. There will
// be a performance boost if this frame is opaque.
*bitmap = createBitmap(index);
if (m_frameGenerator->hasAlpha(index)) {
m_frameData[index].m_hasAlpha = true;
bitmap->setAlphaType(kPremul_SkAlphaType);
} else {
m_frameData[index].m_hasAlpha = false;
bitmap->setAlphaType(kOpaque_SkAlphaType);
}
m_frameData[index].m_frameBytes = m_size.area() * sizeof(ImageFrame::PixelData);
return true;
}
if (m_actualDecoder) {
ImageFrame* buffer = m_actualDecoder->frameBufferAtIndex(index);
if (!buffer || buffer->status() == ImageFrame::FrameEmpty)
return false;
*bitmap = buffer->bitmap();
return true;
}
return false;
}
示例9: TEST
// Reproduce a crash that used to happen for a specific file with specific sequence of method calls.
TEST(AnimatedWebPTests, reproCrash)
{
OwnPtr<WEBPImageDecoder> decoder = createDecoder();
RefPtr<SharedBuffer> fullData = readFile("/LayoutTests/fast/images/resources/invalid_vp8_vp8x.webp");
ASSERT_TRUE(fullData.get());
// Parse partial data up to which error in bitstream is not detected.
const size_t partialSize = 32768;
ASSERT_GT(fullData->size(), partialSize);
RefPtr<SharedBuffer> data = SharedBuffer::create(fullData->data(), partialSize);
decoder->setData(data.get(), false);
EXPECT_EQ(1u, decoder->frameCount());
ImageFrame* frame = decoder->frameBufferAtIndex(0);
ASSERT_TRUE(frame);
EXPECT_EQ(ImageFrame::FramePartial, frame->status());
EXPECT_FALSE(decoder->failed());
// Parse full data now. The error in bitstream should now be detected.
decoder->setData(fullData.get(), true);
EXPECT_EQ(1u, decoder->frameCount());
frame = decoder->frameBufferAtIndex(0);
ASSERT_TRUE(frame);
EXPECT_EQ(ImageFrame::FramePartial, frame->status());
EXPECT_EQ(cAnimationLoopOnce, decoder->repetitionCount());
EXPECT_TRUE(decoder->failed());
}
示例10: DEFINE_THREAD_SAFE_STATIC_LOCAL
void NotificationImageLoader::didFinishLoading(unsigned long resourceIdentifier,
double finishTime) {
// If this has been stopped it is not desirable to trigger further work,
// there is a shutdown of some sort in progress.
if (m_stopped)
return;
DEFINE_THREAD_SAFE_STATIC_LOCAL(
CustomCountHistogram, finishedTimeHistogram,
new CustomCountHistogram("Notifications.Icon.LoadFinishTime", 1,
1000 * 60 * 60 /* 1 hour max */,
50 /* buckets */));
finishedTimeHistogram.count(monotonicallyIncreasingTimeMS() - m_startTime);
if (m_data) {
DEFINE_THREAD_SAFE_STATIC_LOCAL(
CustomCountHistogram, fileSizeHistogram,
new CustomCountHistogram("Notifications.Icon.FileSize", 1,
10000000 /* ~10mb max */, 50 /* buckets */));
fileSizeHistogram.count(m_data->size());
std::unique_ptr<ImageDecoder> decoder = ImageDecoder::create(
m_data, true /* dataComplete */, ImageDecoder::AlphaPremultiplied,
ImageDecoder::ColorSpaceApplied);
if (decoder) {
// The |ImageFrame*| is owned by the decoder.
ImageFrame* imageFrame = decoder->frameBufferAtIndex(0);
if (imageFrame) {
(*m_imageCallback)(imageFrame->bitmap());
return;
}
}
}
runCallbackWithEmptyBitmap();
}
示例11: TRACE_EVENT2
bool ImageFrameGenerator::decode(size_t index, ImageDecoder** decoder, SkBitmap* bitmap)
{
TRACE_EVENT2("blink", "ImageFrameGenerator::decode", "width", m_fullSize.width(), "height", m_fullSize.height());
ASSERT(decoder);
SharedBuffer* data = 0;
bool allDataReceived = false;
bool newDecoder = false;
m_data.data(&data, &allDataReceived);
// Try to create an ImageDecoder if we are not given one.
if (!*decoder) {
newDecoder = true;
if (m_imageDecoderFactory)
*decoder = m_imageDecoderFactory->create().leakPtr();
if (!*decoder)
*decoder = ImageDecoder::create(*data, ImageSource::AlphaPremultiplied, ImageSource::GammaAndColorProfileApplied).leakPtr();
if (!*decoder)
return false;
}
if (!m_isMultiFrame && newDecoder && allDataReceived) {
// If we're using an external memory allocator that means we're decoding
// directly into the output memory and we can save one memcpy.
ASSERT(m_externalAllocator.get());
(*decoder)->setMemoryAllocator(m_externalAllocator.get());
}
(*decoder)->setData(data, allDataReceived);
ImageFrame* frame = (*decoder)->frameBufferAtIndex(index);
// For multi-frame image decoders, we need to know how many frames are
// in that image in order to release the decoder when all frames are
// decoded. frameCount() is reliable only if all data is received and set in
// decoder, particularly with GIF.
if (allDataReceived)
m_frameCount = (*decoder)->frameCount();
(*decoder)->setData(0, false); // Unref SharedBuffer from ImageDecoder.
(*decoder)->clearCacheExceptFrame(index);
(*decoder)->setMemoryAllocator(0);
if (!frame || frame->status() == ImageFrame::FrameEmpty)
return false;
// A cache object is considered complete if we can decode a complete frame.
// Or we have received all data. The image might not be fully decoded in
// the latter case.
const bool isDecodeComplete = frame->status() == ImageFrame::FrameComplete || allDataReceived;
SkBitmap fullSizeBitmap = frame->getSkBitmap();
if (!fullSizeBitmap.isNull())
{
ASSERT(fullSizeBitmap.width() == m_fullSize.width() && fullSizeBitmap.height() == m_fullSize.height());
setHasAlpha(index, !fullSizeBitmap.isOpaque());
}
*bitmap = fullSizeBitmap;
return isDecodeComplete;
}
示例12: frameIsCompleteAtIndex
bool ImageSource::frameIsCompleteAtIndex(size_t index)
{
if (!m_decoder)
return false;
ImageFrame* buffer = m_decoder->frameBufferAtIndex(index);
return buffer && buffer->status() == ImageFrame::FrameComplete;
}
示例13: initializeNewFrame
void GIFImageDecoder::initializeNewFrame(size_t index)
{
ImageFrame* buffer = &m_frameBufferCache[index];
const GIFFrameContext* frameContext = m_reader->frameContext(index);
buffer->setOriginalFrameRect(intersection(frameContext->frameRect(), IntRect(IntPoint(), size())));
buffer->setDuration(frameContext->delayTime());
buffer->setDisposalMethod(frameContext->disposalMethod());
buffer->setRequiredPreviousFrameIndex(findRequiredPreviousFrame(index, false));
}
示例14: frameBufferAtIndex
ImageFrame* ICOImageDecoder::frameBufferAtIndex(size_t index)
{
// Ensure |index| is valid.
if (index >= frameCount())
return 0;
ImageFrame* buffer = &m_frameBufferCache[index];
if (!buffer->isComplete())
decode(index, false);
return buffer;
}
示例15: PlotTracking
void VisualOdometry::PlotTracking(const ImageFrame &frame0,
const ImageFrame &frame1,
const std::vector<cv::DMatch> &matches) {
cv::Mat output_img = frame0.GetImage().clone();
int thickness = 2;
for (int i = 0; i < matches.size(); ++i) {
line(output_img, frame0.keypoints()[matches[i].trainIdx].pt,
frame1.keypoints()[matches[i].queryIdx].pt, cv::Scalar(255, 0, 0),
thickness);
}
cv::imshow("tracking_result", output_img);
cv::waitKey(tracking_wait_time_);
}