本文整理汇总了C++中ImageRGB类的典型用法代码示例。如果您正苦于以下问题:C++ ImageRGB类的具体用法?C++ ImageRGB怎么用?C++ ImageRGB使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ImageRGB类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: glGenTextures
bool CObjectView::loadTexture(const char *file, GLuint *tex){
// kill old one
if (!glIsTexture(*tex)){
// load new texture
glGenTextures(1, tex);
glBindTexture(GL_TEXTURE_2D, *tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, /*GL_LINEAR*/GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, /*GL_LINEAR*/GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
}
else{
glBindTexture(GL_TEXTURE_2D, *tex);
}
CProgressMeter pm(this);
pm.init();
ImageRGB im;
char *err;
float r = im.loadTexture(file, &err, &pm);
if (r > 0){
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, im.getN(), im.getM(), 0, GL_RGB, GL_UNSIGNED_BYTE, &im.index(0, 0));
return true;
}
else{
AfxMessageBox(err, MB_OK | MB_ICONSTOP);
return false;
}
}
示例2: ReadImage
// Minimal mock of VW imageio routines
void ReadImage(const std::string& file, ImageRGB<byte>& image) {
CHECK_PRED1(fs::exists, file);
bool jpeg = IsJpegFilename(file);
// Get size
point2<ptrdiff_t> size;
if (jpeg) {
size = jpeg_read_dimensions(file);
} else {
size = png_read_dimensions(file);
}
// Allocate image data
image.AllocImageData(size.x, size.y);
rgba8_view_t v = interleaved_view(image.GetWidth(),
image.GetHeight(),
(rgba8_pixel_t*)image.GetImageBuffer(),
image.GetWidth()*sizeof(PixelRGB<byte>));
// Load the image
if (jpeg) {
jpeg_read_and_convert_view(file, v);
} else {
png_read_and_convert_view(file, v);
}
// GIL uses 255=opaque but we use 0=opaque
InvertAlpha(image);
}
示例3: Compute
void Compute(const ImageRGB<byte>& imagergb,
const ImageHSV<byte>& imagehsv,
const MatI& seg,
int num_segments) {
const int& w = imagergb.GetWidth();
const int& h = imagergb.GetHeight();
// Compute pixel features
pixel_ftrs.Compute(imagergb, imagehsv);
// Build histograms over labels for each segment
hists.Resize(num_segments, pixel_ftrs.label_map.MaxValue()+1);
hists.Fill(0);
for (int r = 0; r < h; r++) {
const int* labelrow = pixel_ftrs.label_map[r];
const int* segrow = seg[r];
for (int c = 0; c < w; c++) {
hists[ segrow[c] ][ labelrow[c] ]++;
}
}
// Normalize the histograms
features.resize(num_segments);
for (int i = 0; i < num_segments; i++) {
features[i] = hists.GetRow(i);
features[i] /= features[i].Sum();
}
}
示例4: OutputGradientViz
void OutputGradientViz(const string& filename,
const ImageRGB<byte>& image) const {
const int& w = image.GetWidth();
const int& h = image.GetHeight();
// Draw segment boundaries
ImageRGB<byte> canvas;
const MatI& segmap = segmentation;
ImageCopy(image, canvas);
for (int r = 0; r < h; r++) {
for (int c = 0; c < w; c++) {
if (distxform.dists[r][c] == 0) {
canvas[r][c] = BrightColors::Get(segmap[r][c]);
}
}
}
// Draw segment orientation
for (int i = 0; i < num_segments; i++) {
if (seg_sizes[i] > 40) {
const double norm = 10.0 / sqrt(seg_dx[i]*seg_dx[i] + seg_dy[i]*seg_dy[i]);
Vector<2> a = makeVector(seg_x[i], seg_y[i]);
Vector<2> b = makeVector(seg_x[i]+seg_dx[i]*norm, seg_y[i]+seg_dy[i]*norm);
DrawSpot(canvas, BrightColors::Get(i), a, 1);
DrawLineClipped(canvas, a, b, BrightColors::Get(i));
}
}
WriteImage("out/segorients.png", canvas);
}
示例5: save_png_gdiplus
static std::pair<bool, String> save_png_gdiplus(const Path &filename, const ImageRGB &img)
{
// gdi+ does not support URIs
Path winname(filename);
winname.ToWindows();
const auto newsize = winname.Size() + 1;
auto wcstring = std::vector<wchar_t>(newsize);
auto convertedChars = size_t(0);
mbstowcs_s(&convertedChars, wcstring.data(), newsize, winname.CStr(), _TRUNCATE);
Gdiplus::Bitmap *outbm = new Gdiplus::Bitmap(INT(img.GetWidth()), INT(img.GetHeight()), PixelFormat24bppRGB);
if (!outbm)
{
return std::make_pair(false, String(_("Cannot create bitmap")));
}
Gdiplus::BitmapData bitmapData;
auto clip = Gdiplus::Rect(0, 0, outbm->GetWidth(), outbm->GetHeight());
outbm->LockBits(&clip, Gdiplus::ImageLockModeWrite, PixelFormat24bppRGB, &bitmapData);
auto *pixels = (uint8_t*)bitmapData.Scan0;
//#pragma omp parallel for
FOREACHPIXEL(x, y, img)
{
size_t poffset = 3 * x + y * bitmapData.Stride;
const auto &pix = img.At(x, y);
pixels[poffset + 2] = pix.r;
pixels[poffset + 1] = pix.g;
pixels[poffset] = pix.b;
}
示例6: filterImage
void YellowColorFilter::filterImage(ImageRGB & image) {
// Image size;
int width = image.width();
int height = image.height();
// For every pixel in the image
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
// Get the RGB values
Rgb<unsigned char &> pixelRGB = image.at(x, y);
// To save the Hue, Saturation and Value
float hue, saturation, value;
// Convert RGB to HSV.
RGB2HSV(pixelRGB.red, pixelRGB.green, pixelRGB.blue, hue, saturation, value);
// If the color is yellow.
if (hue >= 25 && hue <= 60 && saturation >= 0.60) {
// If the color is within our yellow range, make the output pixel white.
pixelRGB.red = 255;
pixelRGB.green = 255;
pixelRGB.blue = 255;
}
else {
// Else make the pixel black.
pixelRGB.red = 0;
pixelRGB.green = 0;
pixelRGB.blue = 0;
}
}
}
}
示例7: InvertAlpha
// Invert alpha channel in an RGB image
void InvertAlpha(ImageRGB<byte>& image) {
for (int y = 0; y < image.GetHeight(); y++) {
PixelRGB<byte>* row = image[y];
for (int x = 0; x < image.GetWidth(); x++) {
row[x].alpha = 255-row[x].alpha;
}
}
}
示例8:
void FrameCapturer::rgb2bgr(ImageRGB& img) {
ImageRGB::pixel_type pix,pix_end;
for(pix=img.begin(),pix_end=img.end();
pix!=pix_end;
++pix)
rgb2bgr(*pix);
}
示例9: Draw
void Draw(ImageRGB<byte>& canvas, const PixelRGB<byte>& color) const {
const int nx = canvas.GetWidth();
const int ny = canvas.GetWidth();
const float theta = theta;
const float rho = rho;
canvas.SetPenColour(color);
canvas.DrawLine(start[0], start[1], end[0], end[1]);
}
示例10: init
void init()
{
std::string infile("images//distance.bmp");
ImageRGB rgb = Imread(infile);
width = rgb.GetWidth();
height = rgb.GetHeight();
pixelData = GetGLPixelData(rgb);
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, width, 0.0, height);
}
示例11: WriteImage
void WriteImage(const std::string& file, const ImageRGB<byte>& image) {
rgba8c_view_t v = interleaved_view(image.GetWidth(),
image.GetHeight(),
(const rgba8_pixel_t*)image.GetImageBuffer(),
image.GetWidth()*sizeof(PixelRGB<byte>));
// Here we use a hack to work around the fact that GIL uses
// 255=opaque but we use 0=opaque
InvertAlpha(const_cast<ImageRGB<byte>&>(image));
if (IsJpegFilename(file)) {
CHECK(false) << "jpeg output not supported";
//jpeg_write_view(file, v);
} else {
png_write_view(file, v);
}
InvertAlpha(const_cast<ImageRGB<byte>&>(image));
}
示例12: save_png_gdkpixbuf
static std::pair<bool, String> save_png_gdkpixbuf(const Path &fname, const ImageRGB &img)
{
GdkPixbuf *pb = gdk_pixbuf_new(GDK_COLORSPACE_RGB, FALSE, 8,
int(img.GetWidth()), int(img.GetHeight()));
if (!pb)
{
return std::make_pair(false, String(_("Cannot create temporary buffer.")));
}
int w = gdk_pixbuf_get_width(pb);
int h = gdk_pixbuf_get_height(pb);
int rs = gdk_pixbuf_get_rowstride(pb);
guchar *ppix = gdk_pixbuf_get_pixels(pb);
for (int y = 0; y < h; y++)
for (int x = 0; x < w; x++)
{
const auto px = img.At(x, y);
int o2 = x + x + x + y * rs;
ppix[o2] = px.r;
ppix[o2 + 1] = px.g;
ppix[o2 + 2] = px.b;
}
GError *err = NULL;
gchar *utfname;
gsize i;
utfname = g_locale_to_utf8(fname.CStr(), -1, NULL, &i, NULL);
gchar *filename;
filename = g_filename_from_utf8(utfname, -1, NULL, &i, NULL);
g_free(utfname);
gchar *tinfo = g_locale_to_utf8("tEXt::Source", -1, NULL, &i, NULL);
gchar *tinfo2 = g_locale_to_utf8("Saved by libcrn.", -1, NULL, &i, NULL);
bool ok = gdk_pixbuf_save(pb, filename, "png", &err, tinfo, tinfo2, NULL);
g_free(filename);
g_free(tinfo);
g_free(tinfo2);
g_object_unref(pb);
String out(U"");
if (!ok)
{
out = String(_("Cannot save file. ")) + err->message;
g_error_free(err);
}
return std::make_pair(ok, out);
}
示例13: CaptureFrameBuffer
void GlutWindow::CaptureFrameBuffer(ImageRGB<byte>& out) const {
CHECK(InGlutThread())
<< "Frame buffer can only be captured inside the GLUT thread.";
Flush();
ResizeImage(out, size_);
glReadPixels(0, 0, size_.x, size_.y,
GL_BGRA, GL_UNSIGNED_BYTE, out.GetImageBuffer());
ResetAlpha(out); // the GL convention for alpha is different to ours
FlipVertical(out); // the GL convention for top and bottom is different to ours
}
示例14: convertToGrayscale
ImageGray GrayscaleImage::convertToGrayscale(ImageRGB & image) {
// Image size.
int width = image.width();
int height = image.height();
// The destination image.
ImageGray grayImage(width, height);
// For every pixel in the image.
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
// Get the pixel at position x,y.
Rgb<unsigned char &> pixelRGB = image.at(x, y);
unsigned char& pixelGray = grayImage.at(x, y);
// Convert RGB to grayscale.
pixelGray = pixelRGB.red * 0.114 + pixelRGB.green * 0.587 + pixelRGB.blue * 0.299;
}
}
// Return the gray image.
return grayImage;
}
示例15: floor
std::unique_ptr<ImageRGB> imageTransform::convert(const ImageRGB& oldImage) {
std::unique_ptr<ImageRGB> returnImage = std::make_unique<ImageRGB>(460, 110);
matrix m;
float *inverse = m.getInverseMatrix(theMatrix);
float a0 = inverse[0];
float a1 = inverse[1];
float a2 = inverse[2];
float b0 = inverse[3];
float b1 = inverse[4];
float b2 = inverse[5];
float c0 = inverse[6];
float c1 = inverse[7];
float c2 = inverse[8];
#define PT_IN_IMAGE(x,y) (x >= 0 && x < oldImage.width() && y >= 0 && y < oldImage.height())
for (int h = 0; h < returnImage->height(); ++h) {
for (int w = 0; w < returnImage->width(); ++w) {
float x = (a0*w) + (a1*h) + a2;
float y = (b0*w) + (b1*h) + b2;
float w1 = (c0*w) + (c1*h) + c2;
x /= w1;
y /= w1;
if (PT_IN_IMAGE((int)x, (int)y)){ // wh naar xy fix - lars
float x0 = floor(x);
float x1 = ceil(x);
float y0 = floor(y);
float y1 = ceil(y);
float deltaX = (x - x0);
float deltaY = (y - y0);
float p = oldImage.at((int)x0 + .5, (int)y0 + .5).red + (oldImage.at((int)x1 + .5, (int)y0 + .5).red - oldImage.at((int)x0 + .5, (int)y0 + .5).red) * deltaX;
float q = oldImage.at((int)x0 + .5, (int)y1 + .5).red + (oldImage.at((int)x1 + .5, (int)y1 + .5).red - oldImage.at((int)x0 + .5, (int)y1 + .5).red) * deltaX;
returnImage->at(w, h).red = (int)(p + ((q - p)*deltaY));
p = oldImage.at((int)x0 + .5, (int)y0 + .5).green + (oldImage.at((int)x1 + .5, (int)y0 + .5).green - oldImage.at((int)x0 + .5, (int)y0 + .5).green) * deltaX;
q = oldImage.at((int)x0 + .5, (int)y1 + .5).green + (oldImage.at((int)x1 + .5, (int)y1 + .5).green - oldImage.at((int)x0 + .5, (int)y1 + .5).green) * deltaX;
returnImage->at(w, h).green = (int)(p + ((q - p)*deltaY));
p = oldImage.at((int)x0 + .5, (int)y0 + .5).blue + (oldImage.at((int)x1 + .5, (int)y0 + .5).blue - oldImage.at((int)x0 + .5, (int)y0 + .5).blue) * deltaX;
q = oldImage.at((int)x0 + .5, (int)y1 + .5).blue + (oldImage.at((int)x1 + .5, (int)y1 + .5).blue - oldImage.at((int)x0 + .5, (int)y1 + .5).blue) * deltaX;
returnImage->at(w, h).blue = (int)(p + ((q - p)*deltaY));
}
}
}
return returnImage;
}