本文整理汇总了C++中MemoryDataStreamPtr类的典型用法代码示例。如果您正苦于以下问题:C++ MemoryDataStreamPtr类的具体用法?C++ MemoryDataStreamPtr怎么用?C++ MemoryDataStreamPtr使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MemoryDataStreamPtr类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ImageData
//---------------------------------------------------------------------
Codec::DecodeResult PVRTCCodec::decodeV2(DataStreamPtr& stream) const
{
PVRTCTexHeaderV2 header;
uint32 flags = 0, formatFlags = 0;
size_t numFaces = 1; // Assume one face until we know otherwise
ImageData *imgData = OGRE_NEW ImageData();
MemoryDataStreamPtr output;
// Read the PVRTC header
stream->read(&header, sizeof(PVRTCTexHeaderV2));
// Get format flags
flags = header.flags;
flipEndian(reinterpret_cast<void*>(flags), sizeof(uint32));
formatFlags = flags & PVR_TEXTURE_FLAG_TYPE_MASK;
uint32 bitmaskAlpha = header.bitmaskAlpha;
flipEndian(reinterpret_cast<void*>(bitmaskAlpha), sizeof(uint32));
if (formatFlags == kPVRTextureFlagTypePVRTC_4 || formatFlags == kPVRTextureFlagTypePVRTC_2)
{
if (formatFlags == kPVRTextureFlagTypePVRTC_4)
{
imgData->format = bitmaskAlpha ? PF_PVRTC_RGBA4 : PF_PVRTC_RGB4;
}
else if (formatFlags == kPVRTextureFlagTypePVRTC_2)
{
imgData->format = bitmaskAlpha ? PF_PVRTC_RGBA2 : PF_PVRTC_RGB2;
}
imgData->depth = 1;
imgData->width = header.width;
imgData->height = header.height;
imgData->num_mipmaps = static_cast<ushort>(header.numMipmaps);
// PVRTC is a compressed format
imgData->flags |= IF_COMPRESSED;
}
// Calculate total size from number of mipmaps, faces and size
imgData->size = Image::calculateSize(imgData->num_mipmaps, numFaces,
imgData->width, imgData->height, imgData->depth, imgData->format);
// Bind output buffer
output.bind(OGRE_NEW MemoryDataStream(imgData->size));
// Now deal with the data
void *destPtr = output->getPtr();
stream->read(destPtr, imgData->size);
destPtr = static_cast<void*>(static_cast<uchar*>(destPtr));
DecodeResult ret;
ret.first = output;
ret.second = CodecDataPtr(imgData);
return ret;
}
示例2: memStream
//---------------------------------------------------------------------
Codec::DecodeResult STBIImageCodec::decode(DataStreamPtr& input) const
{
// Buffer stream into memory (TODO: override IO functions instead?)
MemoryDataStream memStream(input, true);
int width, height, components;
stbi_uc* pixelData = stbi_load_from_memory(memStream.getPtr(),
static_cast<int>(memStream.size()), &width, &height, &components, 0);
if (!pixelData)
{
OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR,
"Error decoding image: " + String(stbi_failure_reason()),
"STBIImageCodec::decode");
}
SharedPtr<ImageData> imgData(OGRE_NEW ImageData());
MemoryDataStreamPtr output;
imgData->depth = 1; // only 2D formats handled by this codec
imgData->width = width;
imgData->height = height;
imgData->num_mipmaps = 0; // no mipmaps in non-DDS
imgData->flags = 0;
switch( components )
{
case 1:
imgData->format = PF_BYTE_L;
break;
case 2:
imgData->format = PF_BYTE_LA;
break;
case 3:
imgData->format = PF_BYTE_RGB;
break;
case 4:
imgData->format = PF_BYTE_RGBA;
break;
default:
stbi_image_free(pixelData);
OGRE_EXCEPT(Exception::ERR_ITEM_NOT_FOUND,
"Unknown or unsupported image format",
"STBIImageCodec::decode");
break;
}
size_t dstPitch = imgData->width * PixelUtil::getNumElemBytes(imgData->format);
imgData->size = dstPitch * imgData->height;
output.bind(OGRE_NEW MemoryDataStream(pixelData, imgData->size, true));
DecodeResult ret;
ret.first = output;
ret.second = imgData;
return ret;
}
示例3: encode
//---------------------------------------------------------------------
void STBIImageCodec::encodeToFile(MemoryDataStreamPtr& input,
const String& outFileName, Codec::CodecDataPtr& pData) const
{
MemoryDataStreamPtr data = encode(input, pData).staticCast<MemoryDataStream>();
std::ofstream f(outFileName.c_str(), std::ios::out | std::ios::binary);
if(!f.is_open()) {
OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR,
"could not open file",
"STBIImageCodec::encodeToFile" ) ;
}
f.write((char*)data->getPtr(), data->size());
}
示例4: OGRE_EXCEPT
//.........这里部分代码省略.........
multiImage = true;
}
else
{
faces = images[0]->getNumFaces();
multiImage = false;
}
// Check wether number of faces in images exceeds number of faces
// in this texture. If so, clamp it.
if(faces > getNumFaces())
faces = getNumFaces();
if (TextureManager::getSingleton().getVerbose()) {
// Say what we're doing
StringUtil::StrStreamType str;
str << "Texture: " << mName << ": Loading " << faces << " faces"
<< "(" << PixelUtil::getFormatName(images[0]->getFormat()) << "," <<
images[0]->getWidth() << "x" << images[0]->getHeight() << "x" << images[0]->getDepth() <<
") with ";
if (!(mMipmapsHardwareGenerated && mNumMipmaps == 0))
str << mNumMipmaps;
if(mUsage & TU_AUTOMIPMAP)
{
if (mMipmapsHardwareGenerated)
str << " hardware";
str << " generated mipmaps";
}
else
{
str << " custom mipmaps";
}
if(multiImage)
str << " from multiple Images.";
else
str << " from Image.";
// Scoped
{
// Print data about first destination surface
HardwarePixelBufferSharedPtr buf = getBuffer(0, 0);
str << " Internal format is " << PixelUtil::getFormatName(buf->getFormat()) <<
"," << buf->getWidth() << "x" << buf->getHeight() << "x" << buf->getDepth() << ".";
}
LogManager::getSingleton().logMessage(
LML_NORMAL, str.str());
}
// Main loading loop
// imageMips == 0 if the image has no custom mipmaps, otherwise contains the number of custom mips
for(size_t mip = 0; mip<=imageMips; ++mip)
{
for(size_t i = 0; i < faces; ++i)
{
PixelBox src;
if(multiImage)
{
// Load from multiple images
src = images[i]->getPixelBox(0, mip);
}
else
{
// Load from faces of images[0]
src = images[0]->getPixelBox(i, mip);
}
// Sets to treated format in case is difference
src.format = mSrcFormat;
if(mGamma != 1.0f) {
// Apply gamma correction
// Do not overwrite original image but do gamma correction in temporary buffer
MemoryDataStreamPtr buf; // for scoped deletion of conversion buffer
buf.bind(OGRE_NEW MemoryDataStream(
PixelUtil::getMemorySize(
src.getWidth(), src.getHeight(), src.getDepth(), src.format)));
PixelBox corrected = PixelBox(src.getWidth(), src.getHeight(), src.getDepth(), src.format, buf->getPtr());
PixelUtil::bulkPixelConversion(src, corrected);
Image::applyGamma(static_cast<uint8*>(corrected.data), mGamma, corrected.getConsecutiveSize(),
static_cast<uchar>(PixelUtil::getNumElemBits(src.format)));
// Destination: entire texture. blitFromMemory does the scaling to
// a power of two for us when needed
getBuffer(i, mip)->blitFromMemory(corrected);
}
else
{
// Destination: entire texture. blitFromMemory does the scaling to
// a power of two for us when needed
getBuffer(i, mip)->blitFromMemory(src);
}
}
}
// Update size (the final size, not including temp space)
mSize = getNumFaces() * PixelUtil::getMemorySize(mWidth, mHeight, mDepth, mFormat);
}
示例5: FreeImage_SetOutputMessage
//---------------------------------------------------------------------
Codec::DecodeResult FreeImageCodec::decode(DataStreamPtr& input) const
{
// Set error handler
FreeImage_SetOutputMessage(FreeImageLoadErrorHandler);
// Buffer stream into memory (TODO: override IO functions instead?)
MemoryDataStream memStream(input, true);
FIMEMORY* fiMem =
FreeImage_OpenMemory(memStream.getPtr(), static_cast<DWORD>(memStream.size()));
FIBITMAP* fiBitmap = FreeImage_LoadFromMemory(
(FREE_IMAGE_FORMAT)mFreeImageType, fiMem);
if (!fiBitmap)
{
OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR,
"Error decoding image",
"FreeImageCodec::decode");
}
ImageData* imgData = OGRE_NEW ImageData();
MemoryDataStreamPtr output;
imgData->depth = 1; // only 2D formats handled by this codec
imgData->width = FreeImage_GetWidth(fiBitmap);
imgData->height = FreeImage_GetHeight(fiBitmap);
imgData->num_mipmaps = 0; // no mipmaps in non-DDS
imgData->flags = 0;
// Must derive format first, this may perform conversions
FREE_IMAGE_TYPE imageType = FreeImage_GetImageType(fiBitmap);
FREE_IMAGE_COLOR_TYPE colourType = FreeImage_GetColorType(fiBitmap);
unsigned bpp = FreeImage_GetBPP(fiBitmap);
switch(imageType)
{
case FIT_UNKNOWN:
case FIT_COMPLEX:
case FIT_UINT32:
case FIT_INT32:
case FIT_DOUBLE:
default:
OGRE_EXCEPT(Exception::ERR_ITEM_NOT_FOUND,
"Unknown or unsupported image format",
"FreeImageCodec::decode");
break;
case FIT_BITMAP:
// Standard image type
// Perform any colour conversions for greyscale
if (colourType == FIC_MINISWHITE || colourType == FIC_MINISBLACK)
{
FIBITMAP* newBitmap = FreeImage_ConvertToGreyscale(fiBitmap);
// free old bitmap and replace
FreeImage_Unload(fiBitmap);
fiBitmap = newBitmap;
// get new formats
bpp = FreeImage_GetBPP(fiBitmap);
colourType = FreeImage_GetColorType(fiBitmap);
}
// Perform any colour conversions for RGB
else if (bpp < 8 || colourType == FIC_PALETTE || colourType == FIC_CMYK)
{
FIBITMAP* newBitmap = NULL;
if (FreeImage_IsTransparent(fiBitmap))
{
// convert to 32 bit to preserve the transparency
// (the alpha byte will be 0 if pixel is transparent)
newBitmap = FreeImage_ConvertTo32Bits(fiBitmap);
}
else
{
// no transparency - only 3 bytes are needed
newBitmap = FreeImage_ConvertTo24Bits(fiBitmap);
}
// free old bitmap and replace
FreeImage_Unload(fiBitmap);
fiBitmap = newBitmap;
// get new formats
bpp = FreeImage_GetBPP(fiBitmap);
colourType = FreeImage_GetColorType(fiBitmap);
}
// by this stage, 8-bit is greyscale, 16/24/32 bit are RGB[A]
switch(bpp)
{
case 8:
imgData->format = PF_L8;
break;
case 16:
// Determine 555 or 565 from green mask
// cannot be 16-bit greyscale since that's FIT_UINT16
if(FreeImage_GetGreenMask(fiBitmap) == FI16_565_GREEN_MASK)
{
imgData->format = PF_R5G6B5;
}
//.........这里部分代码省略.........
示例6: switch
//-----------------------------------------------------------------------------
void D3D10HardwarePixelBuffer::blitFromMemory(const PixelBox &src, const Image::Box &dstBox)
{
bool isDds = false;
switch(mFormat)
{
case PF_DXT1:
case PF_DXT2:
case PF_DXT3:
case PF_DXT4:
case PF_DXT5:
isDds = true;
break;
default:
break;
}
if (isDds && (dstBox.getWidth() % 4 != 0 || dstBox.getHeight() % 4 != 0 ))
{
return;
}
// for scoped deletion of conversion buffer
MemoryDataStreamPtr buf;
PixelBox converted = src;
D3D10_BOX dstBoxDx10 = OgreImageBoxToDx10Box(dstBox);
// convert to pixelbuffer's native format if necessary
if (src.format != mFormat)
{
buf.bind(new MemoryDataStream(
PixelUtil::getMemorySize(src.getWidth(), src.getHeight(), src.getDepth(),
mFormat)));
converted = PixelBox(src.getWidth(), src.getHeight(), src.getDepth(), mFormat, buf->getPtr());
PixelUtil::bulkPixelConversion(src, converted);
}
// In d3d10 the Row Pitch is defined as: "The size of one row of the source data" and not
// the same as the OGRE row pitch - meaning that we need to multiple the OGRE row pitch
// with the size in bytes of the element to get the d3d10 row pitch.
UINT d3dRowPitch = static_cast<UINT>(converted.rowPitch) * static_cast<UINT>(PixelUtil::getNumElemBytes(mFormat));
switch(mParentTexture->getTextureType()) {
case TEX_TYPE_1D:
{
mDevice->UpdateSubresource(
mParentTexture->GetTex1D(),
0,
&dstBoxDx10,
converted.data,
0,
0 );
if (mDevice.isError())
{
String errorDescription = mDevice.getErrorDescription();
OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR,
"D3D10 device cannot update 1d subresource\nError Description:" + errorDescription,
"D3D10HardwarePixelBuffer::blitFromMemory");
}
}
break;
case TEX_TYPE_CUBE_MAP:
case TEX_TYPE_2D:
{
mDevice->UpdateSubresource(
mParentTexture->GetTex2D(),
static_cast<UINT>(mSubresourceIndex),
&dstBoxDx10,
converted.data,
d3dRowPitch,
mFace );
if (mDevice.isError())
{
String errorDescription = mDevice.getErrorDescription();
OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR,
"D3D10 device cannot update 2d subresource\nError Description:" + errorDescription,
"D3D10HardwarePixelBuffer::blitFromMemory");
}
}
break;
case TEX_TYPE_3D:
{
mDevice->UpdateSubresource(
mParentTexture->GetTex2D(),
static_cast<UINT>(mSubresourceIndex),
&dstBoxDx10,
converted.data,
d3dRowPitch,
static_cast<UINT>(converted.slicePitch)
);
if (mDevice.isError())
{
String errorDescription = mDevice.getErrorDescription();
OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR,
"D3D10 device cannot update 3d subresource\nError Description:" + errorDescription,
//.........这里部分代码省略.........
示例7: switch
//-----------------------------------------------------------------------------
void D3D11HardwarePixelBuffer::blitFromMemory(const PixelBox &src, const Image::Box &dstBox)
{
bool isDds = false;
switch(mFormat)
{
case PF_DXT1:
case PF_DXT2:
case PF_DXT3:
case PF_DXT4:
case PF_DXT5:
isDds = true;
break;
default:
break;
}
if (isDds && (dstBox.getWidth() % 4 != 0 || dstBox.getHeight() % 4 != 0 ))
{
return;
}
// for scoped deletion of conversion buffer
MemoryDataStreamPtr buf;
PixelBox converted = src;
D3D11_BOX dstBoxDx11 = OgreImageBoxToDx11Box(dstBox);
dstBoxDx11.front = 0;
dstBoxDx11.back = converted.getDepth();
// convert to pixelbuffer's native format if necessary
if (src.format != mFormat)
{
buf.bind(new MemoryDataStream(
PixelUtil::getMemorySize(src.getWidth(), src.getHeight(), src.getDepth(),
mFormat)));
converted = PixelBox(src.getWidth(), src.getHeight(), src.getDepth(), mFormat, buf->getPtr());
PixelUtil::bulkPixelConversion(src, converted);
}
if (mUsage & HBU_DYNAMIC)
{
size_t sizeinbytes;
if (PixelUtil::isCompressed(converted.format))
{
// D3D wants the width of one row of cells in bytes
if (converted.format == PF_DXT1)
{
// 64 bits (8 bytes) per 4x4 block
sizeinbytes = std::max<size_t>(1, converted.getWidth() / 4) * std::max<size_t>(1, converted.getHeight() / 4) * 8;
}
else
{
// 128 bits (16 bytes) per 4x4 block
sizeinbytes = std::max<size_t>(1, converted.getWidth() / 4) * std::max<size_t>(1, converted.getHeight() / 4) * 16;
}
}
else
{
sizeinbytes = converted.getHeight() * converted.getWidth() * PixelUtil::getNumElemBytes(converted.format);
}
const Ogre::PixelBox &locked = lock(dstBox, HBL_DISCARD);
memcpy(locked.data, converted.data, sizeinbytes);
unlock();
}
else
{
size_t rowWidth;
if (PixelUtil::isCompressed(converted.format))
{
// D3D wants the width of one row of cells in bytes
if (converted.format == PF_DXT1)
{
// 64 bits (8 bytes) per 4x4 block
rowWidth = (converted.rowPitch / 4) * 8;
}
else
{
// 128 bits (16 bytes) per 4x4 block
rowWidth = (converted.rowPitch / 4) * 16;
}
}
else
{
rowWidth = converted.rowPitch * PixelUtil::getNumElemBytes(converted.format);
}
switch(mParentTexture->getTextureType()) {
case TEX_TYPE_1D:
{
D3D11RenderSystem* rsys = reinterpret_cast<D3D11RenderSystem*>(Root::getSingleton().getRenderSystem());
if (rsys->_getFeatureLevel() >= D3D_FEATURE_LEVEL_10_0)
{
mDevice.GetImmediateContext()->UpdateSubresource(
mParentTexture->GetTex1D(),
//.........这里部分代码省略.........
示例8: sizeof
//---------------------------------------------------------------------
bool ETCCodec::decodePKM(DataStreamPtr& stream, DecodeResult& result) const
{
PKMHeader header;
// Read the ETC header
stream->read(&header, sizeof(PKMHeader));
if (PKM_MAGIC != FOURCC(header.name[0], header.name[1], header.name[2], header.name[3]) ) // "PKM 10"
return false;
uint16 width = (header.iWidthMSB << 8) | header.iWidthLSB;
uint16 height = (header.iHeightMSB << 8) | header.iHeightLSB;
uint16 paddedWidth = (header.iPaddedWidthMSB << 8) | header.iPaddedWidthLSB;
uint16 paddedHeight = (header.iPaddedHeightMSB << 8) | header.iPaddedHeightLSB;
uint16 type = (header.iTextureTypeMSB << 8) | header.iTextureTypeLSB;
ImageData *imgData = OGRE_NEW ImageData();
imgData->depth = 1;
imgData->width = width;
imgData->height = height;
// File version 2.0 supports ETC2 in addition to ETC1
if(header.version[0] == '2' && header.version[1] == '0')
{
switch (type) {
case 0:
imgData->format = PF_ETC1_RGB8;
break;
// GL_COMPRESSED_RGB8_ETC2
case 1:
imgData->format = PF_ETC2_RGB8;
break;
// GL_COMPRESSED_RGBA8_ETC2_EAC
case 3:
imgData->format = PF_ETC2_RGBA8;
break;
// GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2
case 4:
imgData->format = PF_ETC2_RGB8A1;
break;
// Default case is ETC1
default:
imgData->format = PF_ETC1_RGB8;
break;
}
}
else
imgData->format = PF_ETC1_RGB8;
// ETC has no support for mipmaps - malideveloper.com has a example
// where the load mipmap levels from different external files
imgData->num_mipmaps = 0;
// ETC is a compressed format
imgData->flags |= IF_COMPRESSED;
// Calculate total size from number of mipmaps, faces and size
imgData->size = (paddedWidth * paddedHeight) >> 1;
// Bind output buffer
MemoryDataStreamPtr output;
output.bind(OGRE_NEW MemoryDataStream(imgData->size));
// Now deal with the data
void *destPtr = output->getPtr();
stream->read(destPtr, imgData->size);
destPtr = static_cast<void*>(static_cast<uchar*>(destPtr));
DecodeResult ret;
ret.first = output;
ret.second = CodecDataPtr(imgData);
return true;
}
示例9: assert
//-----------------------------------------------------------------------
void Image::scale(const PixelBox &src, const PixelBox &scaled, Filter filter)
{
assert(PixelUtil::isAccessible(src.format));
assert(PixelUtil::isAccessible(scaled.format));
#ifdef NEWSCALING
MemoryDataStreamPtr buf; // For auto-delete
PixelBox temp;
switch (filter) {
case FILTER_NEAREST:
if(src.format == scaled.format) {
// No intermediate buffer needed
temp = scaled;
}
else
{
// Allocate temporary buffer of destination size in source format
temp = PixelBox(scaled.getWidth(), scaled.getHeight(), scaled.getDepth(), src.format);
buf.bind(new MemoryDataStream(temp.getConsecutiveSize()));
temp.data = buf->getPtr();
}
// super-optimized: no conversion
switch (PixelUtil::getNumElemBytes(src.format)) {
case 1: NearestResampler<1>::scale(src, temp); break;
case 2: NearestResampler<2>::scale(src, temp); break;
case 3: NearestResampler<3>::scale(src, temp); break;
case 4: NearestResampler<4>::scale(src, temp); break;
case 6: NearestResampler<6>::scale(src, temp); break;
case 8: NearestResampler<8>::scale(src, temp); break;
case 12: NearestResampler<12>::scale(src, temp); break;
case 16: NearestResampler<16>::scale(src, temp); break;
default:
// never reached
assert(false);
}
if(temp.data != scaled.data)
{
// Blit temp buffer
PixelUtil::bulkPixelConversion(temp, scaled);
}
break;
case FILTER_LINEAR:
case FILTER_BILINEAR:
switch (src.format) {
case PF_L8: case PF_A8: case PF_BYTE_LA:
case PF_R8G8B8: case PF_B8G8R8:
case PF_R8G8B8A8: case PF_B8G8R8A8:
case PF_A8B8G8R8: case PF_A8R8G8B8:
case PF_X8B8G8R8: case PF_X8R8G8B8:
if(src.format == scaled.format) {
// No intermediate buffer needed
temp = scaled;
}
else
{
// Allocate temp buffer of destination size in source format
temp = PixelBox(scaled.getWidth(), scaled.getHeight(), scaled.getDepth(), src.format);
buf.bind(new MemoryDataStream(temp.getConsecutiveSize()));
temp.data = buf->getPtr();
}
// super-optimized: byte-oriented math, no conversion
switch (PixelUtil::getNumElemBytes(src.format)) {
case 1: LinearResampler_Byte<1>::scale(src, temp); break;
case 2: LinearResampler_Byte<2>::scale(src, temp); break;
case 3: LinearResampler_Byte<3>::scale(src, temp); break;
case 4: LinearResampler_Byte<4>::scale(src, temp); break;
default:
// never reached
assert(false);
}
if(temp.data != scaled.data)
{
// Blit temp buffer
PixelUtil::bulkPixelConversion(temp, scaled);
}
break;
case PF_FLOAT32_RGB:
case PF_FLOAT32_RGBA:
if (scaled.format == PF_FLOAT32_RGB || scaled.format == PF_FLOAT32_RGBA)
{
// float32 to float32, avoid unpack/repack overhead
LinearResampler_Float32::scale(src, scaled);
break;
}
// else, fall through
default:
// non-optimized: floating-point math, performs conversion but always works
LinearResampler::scale(src, scaled);
}
break;
default:
// fall back to old, slow, wildly incorrect DevIL code
#endif
#if OGRE_NO_DEVIL == 0
ILuint ImageName;
ilGenImages( 1, &ImageName );
ilBindImage( ImageName );
// Convert image from OGRE to current IL image
//.........这里部分代码省略.........
示例10: decode
Codec::DecodeResult EXRCodec::decode(DataStreamPtr& input) const
{
ImageData * imgData = new ImageData;
MemoryDataStreamPtr output;
try {
// Make a mutable clone of input to be able to change file pointer
MemoryDataStream myIn(input);
// Now we can simulate an OpenEXR file with that
O_IStream str(myIn, "SomeChunk.exr");
InputFile file(str);
Box2i dw = file.header().dataWindow();
int width = dw.max.x - dw.min.x + 1;
int height = dw.max.y - dw.min.y + 1;
int components = 3;
// Alpha channel present?
const ChannelList &channels = file.header().channels();
if(channels.findChannel("A"))
components = 4;
// Allocate memory
output.bind(new MemoryDataStream(width*height*components*4));
// Construct frame buffer
uchar *pixels = output->getPtr();
FrameBuffer frameBuffer;
frameBuffer.insert("R", // name
Slice (FLOAT, // type
((char *) pixels)+0, // base
4 * components, // xStride
4 * components * width)); // yStride
frameBuffer.insert("G", // name
Slice (FLOAT, // type
((char *) pixels)+4, // base
4 * components, // xStride
4 * components * width)); // yStride
frameBuffer.insert("B", // name
Slice (FLOAT, // type
((char *) pixels)+8, // base
4 * components, // xStride
4 * components * width)); // yStride
if(components==4) {
frameBuffer.insert("A", // name
Slice (FLOAT, // type
((char *) pixels)+12, // base
4 * components, // xStride
4 * components * width)); // yStride
}
file.setFrameBuffer (frameBuffer);
file.readPixels (dw.min.y, dw.max.y);
imgData->format = components==3 ? PF_FLOAT32_RGB : PF_FLOAT32_RGBA;
imgData->width = width;
imgData->height = height;
imgData->depth = 1;
imgData->size = width*height*components*4;
imgData->num_mipmaps = 0;
imgData->flags = 0;
} catch (const std::exception &exc) {
delete imgData;
throw(Exception(Exception::ERR_INTERNAL_ERROR,
"OpenEXR Error",
exc.what()));
}
DecodeResult ret;
ret.first = output;
ret.second = CodecDataPtr(imgData);
return ret;
}
示例11: OGRE_EXCEPT
//-----------------------------------------------------------------------------
// blitFromMemory doing hardware trilinear scaling
void GLESTextureBuffer::blitFromMemory(const PixelBox &src_orig, const Image::Box &dstBox)
{
// Fall back to normal GLHardwarePixelBuffer::blitFromMemory in case
// - FBO is not supported
// - Either source or target is luminance due doesn't looks like supported by hardware
// - the source dimensions match the destination ones, in which case no scaling is needed
if(!GL_OES_framebuffer_object ||
PixelUtil::isLuminance(src_orig.format) ||
PixelUtil::isLuminance(mFormat) ||
(src_orig.getWidth() == dstBox.getWidth() &&
src_orig.getHeight() == dstBox.getHeight() &&
src_orig.getDepth() == dstBox.getDepth()))
{
GLESHardwarePixelBuffer::blitFromMemory(src_orig, dstBox);
return;
}
if(!mBuffer.contains(dstBox))
OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "Destination box out of range",
"GLESTextureBuffer::blitFromMemory");
// For scoped deletion of conversion buffer
MemoryDataStreamPtr buf;
PixelBox src;
// First, convert the srcbox to a OpenGL compatible pixel format
if(GLESPixelUtil::getGLOriginFormat(src_orig.format) == 0)
{
// Convert to buffer internal format
buf.bind(OGRE_NEW MemoryDataStream(PixelUtil::getMemorySize(src_orig.getWidth(), src_orig.getHeight(), src_orig.getDepth(),
mFormat)));
src = PixelBox(src_orig.getWidth(), src_orig.getHeight(), src_orig.getDepth(), mFormat, buf->getPtr());
PixelUtil::bulkPixelConversion(src_orig, src);
}
else
{
// No conversion needed
src = src_orig;
}
// Create temporary texture to store source data
GLuint id;
GLenum target = GL_TEXTURE_2D;
GLsizei width = GLESPixelUtil::optionalPO2(src.getWidth());
GLsizei height = GLESPixelUtil::optionalPO2(src.getHeight());
GLenum format = GLESPixelUtil::getClosestGLInternalFormat(src.format);
GLenum datatype = GLESPixelUtil::getGLOriginDataType(src.format);
// Generate texture name
glGenTextures(1, &id);
GL_CHECK_ERROR;
// Set texture type
glBindTexture(target, id);
GL_CHECK_ERROR;
// Set automatic mipmap generation; nice for minimisation
glTexParameteri(target, GL_GENERATE_MIPMAP, GL_TRUE );
GL_CHECK_ERROR;
// Allocate texture memory
glTexImage2D(target, 0, format, width, height, 0, format, datatype, 0);
GL_CHECK_ERROR;
// GL texture buffer
GLESTextureBuffer tex(BLANKSTRING, target, id, width, height, format, src.format,
0, 0, (Usage)(TU_AUTOMIPMAP|HBU_STATIC_WRITE_ONLY), false, false, 0);
// Upload data to 0,0,0 in temporary texture
Image::Box tempTarget(0, 0, 0, src.getWidth(), src.getHeight(), src.getDepth());
tex.upload(src, tempTarget);
// Blit
blitFromTexture(&tex, tempTarget, dstBox);
// Delete temp texture
glDeleteTextures(1, &id);
GL_CHECK_ERROR;
}
示例12: assert
//-----------------------------------------------------------------------
void Image::scale(const PixelBox &src, const PixelBox &scaled, Filter filter)
{
assert(PixelUtil::isAccessible(src.format));
assert(PixelUtil::isAccessible(scaled.format));
MemoryDataStreamPtr buf; // For auto-delete
PixelBox temp;
switch (filter)
{
default:
case FILTER_NEAREST:
if(src.format == scaled.format)
{
// No intermediate buffer needed
temp = scaled;
}
else
{
// Allocate temporary buffer of destination size in source format
temp = PixelBox(scaled.getWidth(), scaled.getHeight(), scaled.getDepth(), src.format);
buf.bind(OGRE_NEW MemoryDataStream(temp.getConsecutiveSize()));
temp.data = buf->getPtr();
}
// super-optimized: no conversion
switch (PixelUtil::getNumElemBytes(src.format))
{
case 1: NearestResampler<1>::scale(src, temp); break;
case 2: NearestResampler<2>::scale(src, temp); break;
case 3: NearestResampler<3>::scale(src, temp); break;
case 4: NearestResampler<4>::scale(src, temp); break;
case 6: NearestResampler<6>::scale(src, temp); break;
case 8: NearestResampler<8>::scale(src, temp); break;
case 12: NearestResampler<12>::scale(src, temp); break;
case 16: NearestResampler<16>::scale(src, temp); break;
default:
// never reached
assert(false);
}
if(temp.data != scaled.data)
{
// Blit temp buffer
PixelUtil::bulkPixelConversion(temp, scaled);
}
break;
case FILTER_LINEAR:
case FILTER_BILINEAR:
switch (src.format)
{
case PF_L8: case PF_A8: case PF_BYTE_LA:
case PF_R8G8B8: case PF_B8G8R8:
case PF_R8G8B8A8: case PF_B8G8R8A8:
case PF_A8B8G8R8: case PF_A8R8G8B8:
case PF_X8B8G8R8: case PF_X8R8G8B8:
if(src.format == scaled.format)
{
// No intermediate buffer needed
temp = scaled;
}
else
{
// Allocate temp buffer of destination size in source format
temp = PixelBox(scaled.getWidth(), scaled.getHeight(), scaled.getDepth(), src.format);
buf.bind(OGRE_NEW MemoryDataStream(temp.getConsecutiveSize()));
temp.data = buf->getPtr();
}
// super-optimized: byte-oriented math, no conversion
switch (PixelUtil::getNumElemBytes(src.format))
{
case 1: LinearResampler_Byte<1>::scale(src, temp); break;
case 2: LinearResampler_Byte<2>::scale(src, temp); break;
case 3: LinearResampler_Byte<3>::scale(src, temp); break;
case 4: LinearResampler_Byte<4>::scale(src, temp); break;
default:
// never reached
assert(false);
}
if(temp.data != scaled.data)
{
// Blit temp buffer
PixelUtil::bulkPixelConversion(temp, scaled);
}
break;
case PF_FLOAT32_RGB:
case PF_FLOAT32_RGBA:
if (scaled.format == PF_FLOAT32_RGB || scaled.format == PF_FLOAT32_RGBA)
{
// float32 to float32, avoid unpack/repack overhead
LinearResampler_Float32::scale(src, scaled);
break;
}
// else, fall through
default:
// non-optimized: floating-point math, performs conversion but always works
LinearResampler::scale(src, scaled);
}
break;
}
}
示例13: OGRE_EXCEPT
//-----------------------------------------------------------------------------
// blitFromMemory doing hardware trilinear scaling
void GLES2TextureBuffer::blitFromMemory(const PixelBox &src_orig, const Image::Box &dstBox)
{
// Fall back to normal GLHardwarePixelBuffer::blitFromMemory in case
// - FBO is not supported
// - Either source or target is luminance due doesn't looks like supported by hardware
// - the source dimensions match the destination ones, in which case no scaling is needed
// TODO: Check that extension is NOT available
if(PixelUtil::isLuminance(src_orig.format) ||
PixelUtil::isLuminance(mFormat) ||
(src_orig.getWidth() == dstBox.getWidth() &&
src_orig.getHeight() == dstBox.getHeight() &&
src_orig.getDepth() == dstBox.getDepth()))
{
GLES2HardwarePixelBuffer::blitFromMemory(src_orig, dstBox);
return;
}
if(!mBuffer.contains(dstBox))
OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "Destination box out of range",
"GLES2TextureBuffer::blitFromMemory");
// For scoped deletion of conversion buffer
MemoryDataStreamPtr buf;
PixelBox src;
// First, convert the srcbox to a OpenGL compatible pixel format
if(GLES2PixelUtil::getGLOriginFormat(src_orig.format) == 0)
{
// Convert to buffer internal format
buf.bind(OGRE_NEW MemoryDataStream(PixelUtil::getMemorySize(src_orig.getWidth(), src_orig.getHeight(),
src_orig.getDepth(), mFormat)));
src = PixelBox(src_orig.getWidth(), src_orig.getHeight(), src_orig.getDepth(), mFormat, buf->getPtr());
PixelUtil::bulkPixelConversion(src_orig, src);
}
else
{
// No conversion needed
src = src_orig;
}
// Create temporary texture to store source data
GLuint id;
GLenum target =
#if OGRE_NO_GLES3_SUPPORT == 0
(src.getDepth() != 1) ? GL_TEXTURE_3D :
#endif
GL_TEXTURE_2D;
GLsizei width = GLES2PixelUtil::optionalPO2(src.getWidth());
GLsizei height = GLES2PixelUtil::optionalPO2(src.getHeight());
GLenum format = GLES2PixelUtil::getClosestGLInternalFormat(src.format);
GLenum datatype = GLES2PixelUtil::getGLOriginDataType(src.format);
// Generate texture name
OGRE_CHECK_GL_ERROR(glGenTextures(1, &id));
// Set texture type
OGRE_CHECK_GL_ERROR(glBindTexture(target, id));
#if GL_APPLE_texture_max_level && OGRE_PLATFORM != OGRE_PLATFORM_NACL
OGRE_CHECK_GL_ERROR(glTexParameteri(target, GL_TEXTURE_MAX_LEVEL_APPLE, 1000 ));
#elif OGRE_NO_GLES3_SUPPORT == 0
OGRE_CHECK_GL_ERROR(glTexParameteri(target, GL_TEXTURE_MAX_LEVEL, 1000 ));
#endif
// Allocate texture memory
#if OGRE_NO_GLES3_SUPPORT == 0
if(target == GL_TEXTURE_3D || target == GL_TEXTURE_2D_ARRAY)
glTexImage3D(target, 0, src.format, src.getWidth(), src.getHeight(), src.getDepth(), 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
else
#endif
OGRE_CHECK_GL_ERROR(glTexImage2D(target, 0, format, width, height, 0, format, datatype, 0));
// GL texture buffer
GLES2TextureBuffer tex(StringUtil::BLANK, target, id, width, height, format, src.format,
0, 0, (Usage)(TU_AUTOMIPMAP|HBU_STATIC_WRITE_ONLY), false, false, 0);
// Upload data to 0,0,0 in temporary texture
Image::Box tempTarget(0, 0, 0, src.getWidth(), src.getHeight(), src.getDepth());
tex.upload(src, tempTarget);
// Blit
blitFromTexture(&tex, tempTarget, dstBox);
// Delete temp texture
OGRE_CHECK_GL_ERROR(glDeleteTextures(1, &id));
}