本文整理汇总了C++中QTPixelBuffer::height方法的典型用法代码示例。如果您正苦于以下问题:C++ QTPixelBuffer::height方法的具体用法?C++ QTPixelBuffer::height怎么用?C++ QTPixelBuffer::height使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类QTPixelBuffer
的用法示例。
在下文中一共展示了QTPixelBuffer::height方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: canDecompress
bool QTDecompressionSession::canDecompress(QTPixelBuffer inBuffer)
{
return m_session
&& inBuffer.pixelFormatType() == m_pixelFormat
&& inBuffer.width() == m_width
&& inBuffer.height() == m_height;
}
示例2: CreateCGImageFromPixelBuffer
static CGImageRef CreateCGImageFromPixelBuffer(QTPixelBuffer buffer)
{
#if USE(ACCELERATED_COMPOSITING)
CGDataProviderRef provider = 0;
CGColorSpaceRef colorSpace = 0;
CGImageRef image = 0;
size_t bitsPerComponent = 0;
size_t bitsPerPixel = 0;
CGImageAlphaInfo alphaInfo = kCGImageAlphaNone;
if (buffer.pixelFormatIs32BGRA()) {
bitsPerComponent = 8;
bitsPerPixel = 32;
alphaInfo = (CGImageAlphaInfo)(kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little);
} else if (buffer.pixelFormatIs32ARGB()) {
bitsPerComponent = 8;
bitsPerPixel = 32;
alphaInfo = (CGImageAlphaInfo)(kCGImageAlphaNoneSkipLast | kCGBitmapByteOrder32Big);
} else {
// All other pixel formats are currently unsupported:
ASSERT_NOT_REACHED();
}
CGDataProviderDirectAccessCallbacks callbacks = {
&QTPixelBuffer::dataProviderGetBytePointerCallback,
&QTPixelBuffer::dataProviderReleaseBytePointerCallback,
&QTPixelBuffer::dataProviderGetBytesAtPositionCallback,
&QTPixelBuffer::dataProviderReleaseInfoCallback,
};
// Colorspace should be device, so that Quartz does not have to do an extra render.
colorSpace = CGColorSpaceCreateDeviceRGB();
require(colorSpace, Bail);
provider = CGDataProviderCreateDirectAccess(buffer.pixelBufferRef(), buffer.dataSize(), &callbacks);
require(provider, Bail);
// CGDataProvider does not retain the buffer, but it will release it later, so do an extra retain here:
QTPixelBuffer::retainCallback(buffer.pixelBufferRef());
image = CGImageCreate(buffer.width(), buffer.height(), bitsPerComponent, bitsPerPixel, buffer.bytesPerRow(), colorSpace, alphaInfo, provider, 0, false, kCGRenderingIntentDefault);
Bail:
// Once the image is created we can release our reference to the provider and the colorspace, they are retained by the image
if (provider)
CGDataProviderRelease(provider);
if (colorSpace)
CGColorSpaceRelease(colorSpace);
return image;
#else
return 0;
#endif
}
示例3: retrieveCurrentImage
void MediaPlayerPrivateQuickTimeVisualContext::retrieveCurrentImage()
{
if (!m_visualContext)
return;
#if USE(ACCELERATED_COMPOSITING)
if (m_qtVideoLayer) {
QTPixelBuffer buffer = m_visualContext->imageForTime(0);
if (!buffer.pixelBufferRef())
return;
WKCACFLayer* layer = static_cast<WKCACFLayer*>(m_qtVideoLayer->platformLayer());
if (!buffer.lockBaseAddress()) {
if (requiredDllsAvailable()) {
if (!m_imageQueue) {
m_imageQueue = new WKCAImageQueue(buffer.width(), buffer.height(), 30);
m_imageQueue->setFlags(WKCAImageQueue::Fill, WKCAImageQueue::Fill);
layer->setContents(m_imageQueue->get());
}
// Debug QuickTime links against a non-Debug version of CoreFoundation, so the
// CFDictionary attached to the CVPixelBuffer cannot be directly passed on into the
// CAImageQueue without being converted to a non-Debug CFDictionary. Additionally,
// old versions of QuickTime used a non-AAS CoreFoundation, so the types are not
// interchangable even in the release case.
RetainPtr<CFDictionaryRef> attachments(AdoptCF, QTCFDictionaryCreateCopyWithDataCallback(kCFAllocatorDefault, buffer.attachments(), &QTCFDictionaryCreateWithDataCallback));
CFTimeInterval imageTime = QTMovieVisualContext::currentHostTime();
m_imageQueue->collect();
uint64_t imageId = m_imageQueue->registerPixelBuffer(buffer.baseAddress(), buffer.dataSize(), buffer.bytesPerRow(), buffer.width(), buffer.height(), buffer.pixelFormatType(), attachments.get(), 0);
if (m_imageQueue->insertImage(imageTime, WKCAImageQueue::Buffer, imageId, WKCAImageQueue::Opaque | WKCAImageQueue::Flush, &QTPixelBuffer::imageQueueReleaseCallback, buffer.pixelBufferRef())) {
// Retain the buffer one extra time so it doesn't dissappear before CAImageQueue decides to release it:
QTPixelBuffer::retainCallback(buffer.pixelBufferRef());
}
} else {
CGImageRef image = CreateCGImageFromPixelBuffer(buffer);
layer->setContents(image);
CGImageRelease(image);
}
buffer.unlockBaseAddress();
layer->rootLayer()->setNeedsRender();
}
} else
#endif
m_player->repaint();
m_visualContext->task();
}
示例4: paint
void MediaPlayerPrivateQuickTimeVisualContext::paint(GraphicsContext* p, const IntRect& r)
{
MediaRenderingMode currentMode = currentRenderingMode();
if (currentMode == MediaRenderingNone)
return;
if (currentMode == MediaRenderingSoftwareRenderer && !m_visualContext)
return;
QTPixelBuffer buffer = m_visualContext->imageForTime(0);
if (buffer.pixelBufferRef()) {
#if USE(ACCELERATED_COMPOSITING)
if (m_qtVideoLayer) {
// We are probably being asked to render the video into a canvas, but
// there's a good chance the QTPixelBuffer is not ARGB and thus can't be
// drawn using CG. If so, fire up an ICMDecompressionSession and convert
// the current frame into something which can be rendered by CG.
if (!buffer.pixelFormatIs32ARGB() && !buffer.pixelFormatIs32BGRA()) {
// The decompression session will only decompress a specific pixelFormat
// at a specific width and height; if these differ, the session must be
// recreated with the new parameters.
if (!m_decompressionSession || !m_decompressionSession->canDecompress(buffer))
m_decompressionSession = QTDecompressionSession::create(buffer.pixelFormatType(), buffer.width(), buffer.height());
buffer = m_decompressionSession->decompress(buffer);
}
}
#endif
CGImageRef image = CreateCGImageFromPixelBuffer(buffer);
CGContextRef context = p->platformContext();
CGContextSaveGState(context);
CGContextTranslateCTM(context, r.x(), r.y());
CGContextTranslateCTM(context, 0, r.height());
CGContextScaleCTM(context, 1, -1);
CGContextDrawImage(context, CGRectMake(0, 0, r.width(), r.height()), image);
CGContextRestoreGState(context);
CGImageRelease(image);
}
paintCompleted(*p, r);
}