本文整理汇总了C++中PixelBuffer::get_format方法的典型用法代码示例。如果您正苦于以下问题:C++ PixelBuffer::get_format方法的具体用法?C++ PixelBuffer::get_format怎么用?C++ PixelBuffer::get_format使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PixelBuffer
的用法示例。
在下文中一共展示了PixelBuffer::get_format方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: outline_provider
CollisionOutline::CollisionOutline(
IODevice &file, const std::string &file_extension,
int alpha_limit,
OutlineAccuracy accuracy,
bool get_insides)
{
if( file_extension == "out" )
{
OutlineProviderFile outline_provider(file);
*this = CollisionOutline(outline_provider.get_contours(), outline_provider.get_size(), accuracy_raw );
}
else
{
PixelBuffer pbuf = ImageProviderFactory::load(file, file_extension);
if( pbuf.get_format() == tf_rgba8 )
{
OutlineProviderBitmap outline_provider(pbuf, alpha_limit, get_insides);
*this = CollisionOutline(outline_provider.get_contours(), outline_provider.get_size(), accuracy );
}
else
{
OutlineProviderBitmap outline_provider(pbuf, alpha_limit, get_insides);
*this = CollisionOutline(outline_provider.get_contours(), outline_provider.get_size(), accuracy_raw );
}
}
set_rotation_hotspot(origin_center);
}
示例2:
TransferTexture::TransferTexture(GraphicContext &gc, const PixelBuffer &pbuff, PixelBufferDirection direction, BufferUsage usage)
{
GraphicContextProvider *gc_provider = gc.get_provider();
PixelBufferProvider *provider = gc_provider->alloc_pixel_buffer();
*this = TransferTexture(provider);
provider->create(pbuff.get_data(), pbuff.get_size(), direction, pbuff.get_format(), usage);
}
示例3: Pointf
OutlineProviderBitmap_Impl::OutlineProviderBitmap_Impl(
const PixelBuffer &pbuf,
int alpha_limit,
bool get_insides)
:
data(0),
get_insides(get_insides),
alpha_limit(alpha_limit),
//double_precision(false),
//consecutive_left_turns(0),
//consecutive_right_turns(0),
alpha_pixel(3),
pb(pbuf),
last_point(0,0),
last_dir(DIR_LEFT)
{
if( pbuf.get_format() != tf_rgba8 )
{
// the image contains no alpha - add only a rectangle
Contour contour;
contour.get_points().push_back( Pointf(0.0f, 0.0f) );
contour.get_points().push_back( Pointf(0.0f, float(height)) );
contour.get_points().push_back( Pointf(float(width), float(height)) );
contour.get_points().push_back( Pointf(float(width), 0.0f) );
contours.push_back(contour);
return;
}
height = pbuf.get_height();
width = pbuf.get_width();
// allocate a grid of unsigned chars, this represents the corners between pixels.
// We will only use the first 4 bits of each char:
// (1 << 0) 0x1 : the pixel to the upper left
// (1 << 1) 0x2 : the pixel to the upper right
// (1 << 2) 0x4 : the pixel to the lower left
// (1 << 3) 0x8 : the pixel to the lower right
data = new unsigned char[(height+1)*(width+1)];
// The image part
for(int y = 0; y <= height; y++)
{
for(int x = 0; x <= width; x++)
{
get_corner(x,y) = 0x0;
if(is_opaque(x-1,y-1))
get_corner(x,y) |= 0x1;
if(is_opaque(x,y-1))
get_corner(x,y) |= 0x2;
if(is_opaque(x-1,y))
get_corner(x,y) |= 0x4;
if(is_opaque(x,y))
get_corner(x,y) |= 0x8;
}
}
find_contours();
}
示例4: state_tracker
void GL1TextureProvider::set_texture_image3d(
GLuint target,
PixelBuffer &image,
int image_depth,
int level)
{
throw_if_disposed();
GL1TextureStateTracker state_tracker(texture_type, handle);
GLint gl_internal_format;
GLenum gl_pixel_format;
to_opengl_textureformat(image.get_format(), gl_internal_format, gl_pixel_format);
// check out if the original texture needs or doesn't need an alpha channel
bool needs_alpha = image.has_transparency();
GLenum format;
GLenum type;
bool conv_needed = !to_opengl_pixelformat(image, format, type);
// also check for the pitch (GL1 can only skip pixels, not bytes)
if (!conv_needed)
{
const int bytesPerPixel = image.get_bytes_per_pixel();
if (image.get_pitch() % bytesPerPixel != 0)
conv_needed = true;
}
// no conversion needed
if (!conv_needed)
{
// Upload to GL1:
// change alignment
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
const int bytesPerPixel = image.get_bytes_per_pixel();
#ifndef __ANDROID__
glPixelStorei(GL_UNPACK_ROW_LENGTH, image.get_pitch() / bytesPerPixel);
glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
#endif
char *data = (char *) image.get_data();
int image_width = image.get_width();
int image_height = image.get_height() / image_depth;
glTexImage3D(
target, // target
level, // level
gl_internal_format, // internalformat
image_width, // width
image_height, // height
image_depth, // depth
0, // border
format, // format
type, // type
data); // texels
}
// conversion needed
else
{
bool big_endian = Endian::is_system_big();
PixelBuffer buffer(
image.get_width(), image.get_height(),
needs_alpha ? tf_rgba8 : tf_rgb8);
buffer.set_image(image);
format = needs_alpha ? GL_RGBA : GL_RGB;
// Upload to OpenGL:
// change alignment
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
const int bytesPerPixel = buffer.get_bytes_per_pixel();
#ifndef __ANDROID__
glPixelStorei(GL_UNPACK_ROW_LENGTH, buffer.get_pitch() / bytesPerPixel);
glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
#endif
int image_width = image.get_width();
int image_height = image.get_height() / image_depth;
// upload
glTexImage3D(
target, // target
level, // level
gl_internal_format, // internalformat
image_width, // width
image_height, // height
image_depth, // depth
0, // border
format, // format
GL_UNSIGNED_BYTE, // type
buffer.get_data()); // texels
}
}
示例5: to_opengl_pixelformat
bool GL1TextureProvider::to_opengl_pixelformat(const PixelBuffer &pbuffer, GLenum &format, GLenum &type)
{
return to_opengl_pixelformat(pbuffer.get_format(), format, type);
}