本文整理汇总了C++中ImageBuf::getpixel方法的典型用法代码示例。如果您正苦于以下问题:C++ ImageBuf::getpixel方法的具体用法?C++ ImageBuf::getpixel怎么用?C++ ImageBuf::getpixel使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ImageBuf
的用法示例。
在下文中一共展示了ImageBuf::getpixel方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ImageBuf_add
// Tests ImageBufAlgo::add
void ImageBuf_add ()
{
const int WIDTH = 8;
const int HEIGHT = 8;
const int CHANNELS = 4;
ImageSpec spec (WIDTH, HEIGHT, CHANNELS, TypeDesc::FLOAT);
spec.alpha_channel = 3;
// Create buffers
ImageBuf A ("A", spec);
const float Aval[CHANNELS] = { 0.1, 0.2, 0.3, 0.4 };
ImageBufAlgo::fill (A, Aval);
ImageBuf B ("B", spec);
const float Bval[CHANNELS] = { 0.01, 0.02, 0.03, 0.04 };
ImageBufAlgo::fill (B, Bval);
ImageBuf C ("C", spec);
ImageBufAlgo::add (C, A, B);
for (int j = 0; j < HEIGHT; ++j) {
for (int i = 0; i < WIDTH; ++i) {
float pixel[CHANNELS];
C.getpixel (i, j, pixel);
for (int c = 0; c < CHANNELS; ++c)
OIIO_CHECK_EQUAL (pixel[c], Aval[c]+Bval[c]);
}
}
}
示例2: DASSERT
static void
interppixel_NDC_clamped (const ImageBuf &buf, float x, float y, float *pixel,
bool envlatlmode)
{
int fx = buf.spec().full_x;
int fy = buf.spec().full_y;
int fw = buf.spec().full_width;
int fh = buf.spec().full_height;
x = static_cast<float>(fx) + x * static_cast<float>(fw);
y = static_cast<float>(fy) + y * static_cast<float>(fh);
const int maxchannels = 64; // Reasonable guess
float p[4][maxchannels];
DASSERT (buf.spec().nchannels <= maxchannels &&
"You need to increase maxchannels");
int n = std::min (buf.spec().nchannels, maxchannels);
x -= 0.5f;
y -= 0.5f;
int xtexel, ytexel;
float xfrac, yfrac;
xfrac = floorfrac (x, &xtexel);
yfrac = floorfrac (y, &ytexel);
// Clamp
int xnext = Imath::clamp (xtexel+1, buf.xmin(), buf.xmax());
int ynext = Imath::clamp (ytexel+1, buf.ymin(), buf.ymax());
xnext = Imath::clamp (xnext, buf.xmin(), buf.xmax());
ynext = Imath::clamp (ynext, buf.ymin(), buf.ymax());
// Get the four texels
buf.getpixel (xtexel, ytexel, p[0], n);
buf.getpixel (xnext, ytexel, p[1], n);
buf.getpixel (xtexel, ynext, p[2], n);
buf.getpixel (xnext, ynext, p[3], n);
if (envlatlmode) {
// For latlong environment maps, in order to conserve energy, we
// must weight the pixels by sin(t*PI) because pixels closer to
// the pole are actually less area on the sphere. Doing this
// wrong will tend to over-represent the high latitudes in
// low-res MIP levels. We fold the area weighting into our
// linear interpolation by adjusting yfrac.
float w0 = (1.0f - yfrac) * sinf ((float)M_PI * (ytexel+0.5f)/(float)fh);
float w1 = yfrac * sinf ((float)M_PI * (ynext+0.5f)/(float)fh);
yfrac = w0 / (w0 + w1);
}
// Bilinearly interpolate
bilerp (p[0], p[1], p[2], p[3], xfrac, yfrac, n, pixel);
}
示例3: test_paste
void test_paste ()
{
std::cout << "test paste\n";
// Create the source image, make it a gradient
ImageSpec Aspec (4, 4, 3, TypeDesc::FLOAT);
ImageBuf A (Aspec);
for (ImageBuf::Iterator<float> it (A); !it.done(); ++it) {
it[0] = float(it.x()) / float(Aspec.width-1);
it[1] = float(it.y()) / float(Aspec.height-1);
it[2] = 0.1f;
}
// Create destination image -- black it out
ImageSpec Bspec (8, 8, 3, TypeDesc::FLOAT);
ImageBuf B (Bspec);
float gray[3] = { .1, .1, .1 };
ImageBufAlgo::fill (B, gray);
// Paste a few pixels from A into B -- include offsets
ImageBufAlgo::paste (B, 2, 2, 0, 1 /* chan offset */,
A, ROI(1, 4, 1, 4));
// Spot check
float a[3], b[3];
B.getpixel (1, 1, 0, b);
OIIO_CHECK_EQUAL (b[0], gray[0]);
OIIO_CHECK_EQUAL (b[1], gray[1]);
OIIO_CHECK_EQUAL (b[2], gray[2]);
B.getpixel (2, 2, 0, b);
A.getpixel (1, 1, 0, a);
OIIO_CHECK_EQUAL (b[0], gray[0]);
OIIO_CHECK_EQUAL (b[1], a[0]);
OIIO_CHECK_EQUAL (b[2], a[1]);
B.getpixel (3, 4, 0, b);
A.getpixel (2, 3, 0, a);
OIIO_CHECK_EQUAL (b[0], gray[0]);
OIIO_CHECK_EQUAL (b[1], a[0]);
OIIO_CHECK_EQUAL (b[2], a[1]);
}
示例4: ASSERT
static void
fix_latl_edges (ImageBuf &buf)
{
ASSERT (envlatlmode && "only call fix_latl_edges for latlong maps");
int n = buf.nchannels();
float *left = ALLOCA (float, n);
float *right = ALLOCA (float, n);
// Make the whole first and last row be solid, since they are exactly
// on the pole
float wscale = 1.0f / (buf.spec().width);
for (int j = 0; j <= 1; ++j) {
int y = (j==0) ? buf.ybegin() : buf.yend()-1;
// use left for the sum, right for each new pixel
for (int c = 0; c < n; ++c)
left[c] = 0.0f;
for (int x = buf.xbegin(); x < buf.xend(); ++x) {
buf.getpixel (x, y, right);
for (int c = 0; c < n; ++c)
left[c] += right[c];
}
for (int c = 0; c < n; ++c)
left[c] += right[c];
for (int c = 0; c < n; ++c)
left[c] *= wscale;
for (int x = buf.xbegin(); x < buf.xend(); ++x)
buf.setpixel (x, y, left);
}
// Make the left and right match, since they are both right on the
// prime meridian.
for (int y = buf.ybegin(); y < buf.yend(); ++y) {
buf.getpixel (buf.xbegin(), y, left);
buf.getpixel (buf.xend()-1, y, right);
for (int c = 0; c < n; ++c)
left[c] = 0.5f * left[c] + 0.5f * right[c];
buf.setpixel (buf.xbegin(), y, left);
buf.setpixel (buf.xend()-1, y, left);
}
}
示例5: ALLOCA
void
check_transfer(const ImageBuf &in, const float *table, int size) {
// skip the first pixel which will be an invalid negative luminance
// value, this value is just to test the fwd transfer doesn't create
// nan's.
float *pixel = ALLOCA(float, CHANNELS);
for (int y = 1; y < STEPS; y++) {
in.getpixel (0, y, pixel);
for (int c = 0; c < CHANNELS; ++c) {
if (c == in.spec().alpha_channel ||
c == in.spec().z_channel)
OIIO_CHECK_EQUAL (pixel[c], ALPHA);
else
OIIO_CHECK_ASSERT (is_equal(pixel[c], table[y]));
}
}
}
示例6: ImageBuf_zero_fill
// Test ImageBuf::zero and ImageBuf::fill
void ImageBuf_zero_fill ()
{
const int WIDTH = 8;
const int HEIGHT = 6;
const int CHANNELS = 4;
ImageSpec spec (WIDTH, HEIGHT, CHANNELS, TypeDesc::FLOAT);
spec.alpha_channel = 3;
// Create a buffer -- pixels should be undefined
ImageBuf A ("A", spec);
// Set a pixel to an odd value, make sure it takes
const float arbitrary1[CHANNELS] = { 0.2, 0.3, 0.4, 0.5 };
A.setpixel (1, 1, arbitrary1);
float pixel[CHANNELS]; // test pixel
A.getpixel (1, 1, pixel);
for (int c = 0; c < CHANNELS; ++c)
OIIO_CHECK_EQUAL (pixel[c], arbitrary1[c]);
// Zero out and test that it worked
ImageBufAlgo::zero (A);
for (int j = 0; j < HEIGHT; ++j) {
for (int i = 0; i < WIDTH; ++i) {
float pixel[CHANNELS];
A.getpixel (i, j, pixel);
for (int c = 0; c < CHANNELS; ++c)
OIIO_CHECK_EQUAL (pixel[c], 0.0f);
}
}
// Test fill of whole image
const float arbitrary2[CHANNELS] = { 0.6, 0.7, 0.3, 0.9 };
ImageBufAlgo::fill (A, arbitrary2);
for (int j = 0; j < HEIGHT; ++j) {
for (int i = 0; i < WIDTH; ++i) {
float pixel[CHANNELS];
A.getpixel (i, j, pixel);
for (int c = 0; c < CHANNELS; ++c)
OIIO_CHECK_EQUAL (pixel[c], arbitrary2[c]);
}
}
// Test fill of partial image
const float arbitrary3[CHANNELS] = { 0.42, 0.43, 0.44, 0.45 };
{
const int xbegin = 3, xend = 5, ybegin = 0, yend = 4;
ImageBufAlgo::fill (A, arbitrary3, xbegin, xend, ybegin, yend);
for (int j = 0; j < HEIGHT; ++j) {
for (int i = 0; i < WIDTH; ++i) {
float pixel[CHANNELS];
A.getpixel (i, j, pixel);
if (j >= ybegin && j < yend && i >= xbegin && i < xend) {
for (int c = 0; c < CHANNELS; ++c)
OIIO_CHECK_EQUAL (pixel[c], arbitrary3[c]);
} else {
for (int c = 0; c < CHANNELS; ++c)
OIIO_CHECK_EQUAL (pixel[c], arbitrary2[c]);
}
}
}
}
}
示例7: pixel
bool
setNbChannels(ImageBuf &dst, const ImageBuf &src, int numChannels)
{
// Not intended to create 0-channel images.
if (numChannels <= 0)
return false;
// If we dont have a single source channel,
// hard to know how big to make the additional channels
if (src.spec().nchannels == 0)
return false;
if (numChannels == src.spec().nchannels) {
dst = src;
return true;
}
// Update the ImageSpec
// (should this be moved to a helper function in the imagespec.h?
ImageSpec dst_spec = src.spec();
dst_spec.nchannels = numChannels;
if (numChannels < src.spec().nchannels) {
// Reduce the number of formats, and names, if needed
if (static_cast<int>(dst_spec.channelformats.size()) == src.spec().nchannels)
dst_spec.channelformats.resize(numChannels);
if (static_cast<int>(dst_spec.channelnames.size()) == src.spec().nchannels)
dst_spec.channelnames.resize(numChannels);
if (dst_spec.alpha_channel < numChannels-1) {
dst_spec.alpha_channel = -1;
}
if (dst_spec.z_channel < numChannels-1) {
dst_spec.z_channel = -1;
}
} else {
// Increase the number of formats, and names, if needed
if (static_cast<int>(dst_spec.channelformats.size()) == src.spec().nchannels) {
for (int c = dst_spec.channelnames.size(); c < numChannels; ++c) {
dst_spec.channelformats.push_back(dst_spec.format);
}
}
if (static_cast<int>(dst_spec.channelnames.size()) == src.spec().nchannels) {
for (int c = dst_spec.channelnames.size(); c < numChannels; ++c) {
dst_spec.channelnames.push_back (Strutil::format("channel%d", c));
}
}
}
// Update the image (realloc with the new spec)
dst.alloc (dst_spec);
std::vector<float> pixel(numChannels, 0.0f);
// Walk though the data window. I.e., the crop window in a small image
// or the overscanned area in a large image.
for (int k = dst_spec.z; k < dst_spec.z+dst_spec.depth; k++) {
for (int j = dst_spec.y; j < dst_spec.y+dst_spec.height; j++) {
for (int i = dst_spec.x; i < dst_spec.x+dst_spec.width; i++) {
src.getpixel (i, j, k, &pixel[0], numChannels);
dst.setpixel (i, j, k, &pixel[0], numChannels);
}
}
}
return true;
}
示例8: int
OIIO_NAMESPACE_BEGIN
bool
ImageBufAlgo::from_IplImage (ImageBuf &dst, const IplImage *ipl,
TypeDesc convert)
{
if (! ipl) {
DASSERT (0 && "ImageBufAlgo::fromIplImage called with NULL ipl");
dst.error ("Passed NULL source IplImage");
return false;
}
#ifdef USE_OPENCV
TypeDesc srcformat;
switch (ipl->depth) {
case int(IPL_DEPTH_8U) :
srcformat = TypeDesc::UINT8; break;
case int(IPL_DEPTH_8S) :
srcformat = TypeDesc::INT8; break;
case int(IPL_DEPTH_16U) :
srcformat = TypeDesc::UINT16; break;
case int(IPL_DEPTH_16S) :
srcformat = TypeDesc::INT16; break;
case int(IPL_DEPTH_32F) :
srcformat = TypeDesc::FLOAT; break;
case int(IPL_DEPTH_64F) :
srcformat = TypeDesc::DOUBLE; break;
default:
DASSERT (0 && "unknown IplImage type");
dst.error ("Unsupported IplImage depth %d", (int)ipl->depth);
return false;
}
TypeDesc dstformat = (convert != TypeDesc::UNKNOWN) ? convert : srcformat;
ImageSpec spec (ipl->width, ipl->height, ipl->nChannels, dstformat);
// N.B. The OpenCV headers say that ipl->alphaChannel,
// ipl->colorModel, and ipl->channelSeq are ignored by OpenCV.
if (ipl->dataOrder != IPL_DATA_ORDER_PIXEL) {
// We don't handle separate color channels, and OpenCV doesn't either
dst.error ("Unsupported IplImage data order %d", (int)ipl->dataOrder);
return false;
}
dst.reset (dst.name(), spec);
size_t pixelsize = srcformat.size()*spec.nchannels;
// Account for the origin in the line step size, to end up with the
// standard OIIO origin-at-upper-left:
size_t linestep = ipl->origin ? -ipl->widthStep : ipl->widthStep;
// Block copy and convert
convert_image (spec.nchannels, spec.width, spec.height, 1,
ipl->imageData, srcformat,
pixelsize, linestep, 0,
dst.pixeladdr(0,0), dstformat,
spec.pixel_bytes(), spec.scanline_bytes(), 0);
// FIXME - honor dataOrder. I'm not sure if it is ever used by
// OpenCV. Fix when it becomes a problem.
// OpenCV uses BGR ordering
// FIXME: what do they do with alpha?
if (spec.nchannels >= 3) {
float pixel[4];
for (int y = 0; y < spec.height; ++y) {
for (int x = 0; x < spec.width; ++x) {
dst.getpixel (x, y, pixel, 4);
float tmp = pixel[0]; pixel[0] = pixel[2]; pixel[2] = tmp;
dst.setpixel (x, y, pixel, 4);
}
}
}
// FIXME -- the copy and channel swap should happen all as one loop,
// probably templated by type.
return true;
#else
dst.error ("fromIplImage not supported -- no OpenCV support at compile time");
return false;
#endif
}
示例9: scanline
bool
ImageBufAlgo::colorconvert (ImageBuf &dst, const ImageBuf &src,
const ColorProcessor* processor,
bool unpremult)
{
// If the processor is NULL, return false (error)
if (!processor)
return false;
ImageSpec dstspec = dst.spec();
std::vector<float> scanline(dstspec.width*4, 0.0f);
// Only process up to, and including, the first 4 channels.
// This does let us process images with fewer than 4 channels, which is the intent
// FIXME: Instead of loading the first 4 channels, obey dstspec.alpha_channel index
// (but first validate that the index is set properly for normal formats)
int channelsToCopy = std::min (4, dstspec.nchannels);
// Walk through all data in our buffer. (i.e., crop or overscan)
// FIXME: What about the display window? Should this actually promote
// the datawindow to be union of data + display? This is useful if
// the color of black moves. (In which case non-zero sections should
// now be promoted). Consider the lin->log of a roto element, where
// black now moves to non-black
//
// FIXME: Use the ImageBuf::ConstIterator<T,T> s (src); s.isValid()
// idiom for traversal instead, to allow for more efficient tile access
// iteration order
float * dstPtr = NULL;
const float fltmin = std::numeric_limits<float>::min();
for (int k = dstspec.z; k < dstspec.z+dstspec.depth; k++) {
for (int j = dstspec.y; j < dstspec.y+dstspec.height; j++) {
// Load the scanline
dstPtr = &scanline[0];
for (int i = dstspec.x; i < dstspec.x+dstspec.width ; i++) {
src.getpixel (i, j, dstPtr, channelsToCopy);
dstPtr += 4;
}
// Optionally unpremult
if ((channelsToCopy>=4) && unpremult) {
float alpha = 0.0;
for (int i=0; i<dstspec.width; ++i) {
alpha = scanline[4*i+3];
if (alpha > fltmin) {
scanline[4*i+0] /= alpha;
scanline[4*i+1] /= alpha;
scanline[4*i+2] /= alpha;
}
}
}
// Apply the color transformation in place
// This is always an rgba float image, due to the conversion above.
for(int i=0; i<dstspec.width; ++i)
{
scanline[4*i+0] = (*processor->t2)((*processor->t1)(scanline[4*i+0]));
scanline[4*i+1] = (*processor->t2)((*processor->t1)(scanline[4*i+1]));
scanline[4*i+2] = (*processor->t2)((*processor->t1)(scanline[4*i+2]));
}
// Optionally premult
if ((channelsToCopy>=4) && unpremult) {
float alpha = 0.0;
for (int i=0; i<dstspec.width; ++i) {
alpha = scanline[4*i+3];
if (alpha > fltmin) {
scanline[4*i+0] *= alpha;
scanline[4*i+1] *= alpha;
scanline[4*i+2] *= alpha;
}
}
}
// Store the scanline
dstPtr = &scanline[0];
for (int i = dstspec.x; i < dstspec.x+dstspec.width ; i++) {
dst.setpixel (i, j, dstPtr, channelsToCopy);
dstPtr += 4;
}
}
}
return true;
}
示例10: if
bool
ImageBufAlgo::deepen (ImageBuf &dst, const ImageBuf &src, float zvalue,
ROI roi, int nthreads)
{
if (src.deep()) {
// For some reason, we were asked to deepen an already-deep image.
// So just copy it.
return dst.copy (src);
// FIXME: once paste works for deep files, this should really be
// return paste (dst, roi.xbegin, roi.ybegin, roi.zbegin, roi.chbegin,
// src, roi, nthreads);
}
// Construct an ideal spec for dst, which is like src but deep.
const ImageSpec &srcspec (src.spec());
int nc = srcspec.nchannels;
int zback_channel = -1;
ImageSpec force_spec = srcspec;
force_spec.deep = true;
force_spec.set_format (TypeDesc::FLOAT);
force_spec.channelformats.clear();
for (int c = 0; c < nc; ++c) {
if (force_spec.channelnames[c] == "Z")
force_spec.z_channel = c;
else if (force_spec.channelnames[c] == "Zback")
zback_channel = c;
}
bool add_z_channel = (force_spec.z_channel < 0);
if (add_z_channel) {
// No z channel? Make one.
force_spec.z_channel = force_spec.nchannels++;
force_spec.channelnames.push_back ("Z");
}
if (! IBAprep (roi, &dst, &src, NULL, &force_spec, IBAprep_SUPPORT_DEEP))
return false;
if (! dst.deep()) {
dst.error ("Cannot deepen to a flat image");
return false;
}
float *pixel = OIIO_ALLOCA (float, nc);
// First, figure out which pixels get a sample and which do not
for (int z = roi.zbegin; z < roi.zend; ++z)
for (int y = roi.ybegin; y < roi.yend; ++y)
for (int x = roi.xbegin; x < roi.xend; ++x) {
bool has_sample = false;
src.getpixel (x, y, z, pixel);
for (int c = 0; c < nc; ++c)
if (c != force_spec.z_channel && c != zback_channel
&& pixel[c] != 0.0f) {
has_sample = true;
break;
}
if (! has_sample && ! add_z_channel)
for (int c = 0; c < nc; ++c)
if ((c == force_spec.z_channel || c == zback_channel)
&& (pixel[c] != 0.0f && pixel[c] < 1e30)) {
has_sample = true;
break;
}
if (has_sample)
dst.set_deep_samples (x, y, z, 1);
}
dst.deep_alloc ();
// Now actually set the values
for (int z = roi.zbegin; z < roi.zend; ++z)
for (int y = roi.ybegin; y < roi.yend; ++y)
for (int x = roi.xbegin; x < roi.xend; ++x) {
if (dst.deep_samples (x, y, z) == 0)
continue;
for (int c = 0; c < nc; ++c)
dst.set_deep_value (x, y, z, c, 0 /*sample*/,
src.getchannel (x, y, z, c));
if (add_z_channel)
dst.set_deep_value (x, y, z, nc, 0, zvalue);
}
bool ok = true;
// FIXME -- the above doesn't split into threads. Someday, it should
// be refactored like this:
// OIIO_DISPATCH_COMMON_TYPES2 (ok, "deepen", deepen_,
// dst.spec().format, srcspec.format,
// dst, src, add_z_channel, z, roi, nthreads);
return ok;
}