本文整理汇总了C++中ImageSize类的典型用法代码示例。如果您正苦于以下问题:C++ ImageSize类的具体用法?C++ ImageSize怎么用?C++ ImageSize使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ImageSize类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ExtractSubImageWithBoundary
int ExtractSubImageWithBoundary(
const vector<float> &im_input,
const ImageSize &imSize_input,
const ImageSize &imSize_output,
const size_t &h_offset,
const size_t &w_offset,
const size_t &boundary_length,
vector<float> *im_output
)
{
im_output->resize(imSize_output.whc);
for(int c=0; c<imSize_output.nChannels; ++c)
{
for(int w=0; w<imSize_output.width; ++w)
{
for(int h=0; h<imSize_output.height; ++h)
{
im_output->at(imSize_output.at(h,w,c)) = im_input[imSize_input.symmetrise_at(h-boundary_length+h_offset, w-boundary_length+w_offset, c)];
}
}
}
return EXIT_SUCCESS;
}
示例2: debug
void WindowAdapterTest::testWindowAdapter() {
debug(LOG_DEBUG, DEBUG_LOG, 0, "window adapter test");
// create an image
Image<unsigned char> image(16, 16);
for (unsigned int x = 0; x < 16; x++) {
for (unsigned int y = 0; y < 16; y++) {
image.pixel(x, y) = x * y;
}
}
// create the subframe
ImageRectangle frame(ImagePoint(4, 4), ImageSize(8, 8));
debug(LOG_DEBUG, DEBUG_LOG, 0, "frame: %s", frame.toString().c_str());
// create an adapter for a subframe
WindowAdapter<unsigned char> adapter(image, frame);
// access the subframe
ImageSize size = adapter.getSize();
debug(LOG_DEBUG, DEBUG_LOG, 0, "adapter size: %s",
size.toString().c_str());
for (int x = 0; x < size.width(); x++) {
for (int y = 0; y < size.height(); y++) {
unsigned char value = adapter.pixel(x, y);
unsigned char v
= (frame.origin().x() + x) * (frame.origin().y() + y);
if (v != value) {
debug(LOG_DEBUG, DEBUG_LOG, 0, "expected %d != %d found",
(int)v, (int)value);
}
CPPUNIT_ASSERT(value == v);
}
}
debug(LOG_DEBUG, DEBUG_LOG, 0, "window adapter test complete");
}
示例3: PartialRecomposeImageWithBoundaries
int PartialRecomposeImageWithBoundaries(
const vector<float> &im_subdivide,
const ImageSize &imSize_subdivide,
const ImageSize &imSize_output,
const int &h_offset,
const int &w_offset,
const int &boundary_length,
const int &boundary_w_length,
const int &boundary_h_length,
vector<float> *im_output
)
{
//! Compute the image
for(int c=0; c<imSize_output.nChannels; ++c)
{
for(int w=0; w<imSize_subdivide.width-boundary_w_length; ++w) //X
{
if(w+w_offset >= imSize_output.width) continue;
for(int h=0; h<imSize_subdivide.height-boundary_h_length; ++h) //X
{
if(h+h_offset >= imSize_output.height) continue;
im_output->at(imSize_output.at(h+h_offset, w+w_offset, c)) = im_subdivide[imSize_subdivide.at(h+boundary_length, w+boundary_length, c)];
}
}
}
return EXIT_SUCCESS;
}
示例4: String
std::string ImageSize::
String(ImageSize imageSize)
{
std::ostringstream out;
out << imageSize.width();
out << "x";
out << imageSize.height();
return out.str();
}
示例5: result
/**
* \brief Compute size of the complex fourier transform image
*
* We are using the real data DFTs from the fftw3 library, which uses a layout
* different from what you would expect from our image types. When going
* through a pixel array In our image types, the quickly increasing
* coordinate is the horizontal coordinate, which we usually call the
* x coordinate, and which is also the first coordinate. In FFTW3, the slowly
* increasing coordinate when going through the FFT array is the second
* coordinate. So if an image has width w and height h, then we have
* treat it as a data array with n0 = h and n1 = w. The corresponding
* fourier transform array for the real data transforms then has dimensions
* n0 and (n1/2 + 1). But since again the second coordinate is the one that
* increases quickly, we have to create an image of width (n1/2 + 1) and
* height n0.
*
* All this is unimportant as long as we don't look at the fourier transform
* as an image in its own right. Only then does it become important how we
* interpret the coordinates.
*
* \param size size of the image to be fourier transformed
*/
ImageSize FourierImage::fsize(const ImageSize& size) {
int w = size.width();
int h = size.height();
int n0 = h;
int n1 = w;
ImageSize result(2 * (1 + n1 / 2), n0);
debug(LOG_DEBUG, DEBUG_LOG, 0, "fourier image size %s -> %s",
size.toString().c_str(), result.toString().c_str());
return result;
}
示例6: ExtractPatches
vector<float> ExtractPatches(
const vector<float> &im_input,
const ImageSize &imSize,
const int length
)
{
const int dim_patch = length*length*imSize.nChannels;
const int im_height = imSize.height-length;
const int im_width = imSize.width-length;
const int number_patch = im_height*im_width;
vector<float> patches(number_patch*dim_patch);
for(int c=0; c<imSize.nChannels; ++c)
{
for(int w=0; w<im_width; ++w)
{
for(int h=0; h<im_height; ++h)
{
for(int j=0; j<length; ++j)
{
for(int i=0; i<length; ++i)
{
size_t pos_patch = i + j*length + c*length*length;
size_t pos_pixel = h + w*im_height;
patches[pos_pixel*dim_patch + pos_patch] = im_input[imSize.at(h+i, w+j, c)];
}
}
}
}
}
return patches;
}
示例7: ExtractCenteredPatches
vector<float> ExtractCenteredPatches(
const vector<float> &im_input,
const ImageSize &imSize,
const int l
)
{
const int length = 2*l+1;
const int dim_patch = length*length;
const int im_height = imSize.height-2*l;
const int im_width = imSize.width-2*l;
const int im_wh = im_height*im_width;
const int number_patch = im_height*im_width*imSize.nChannels;
vector<float> patches(number_patch*dim_patch);
int c=0;
for(int w=l; w<imSize.width-l; ++w)
{
for(int h=l; h<imSize.height-l; ++h)
{
for(int j=-l; j<=l; ++j)
{
for(int i=-l; i<=l; ++i)
{
size_t pos_patch = i+l + (j+l)*length;
size_t pos_pixel = (h-l) + (w-l)*im_height + c*im_wh;
patches[pos_pixel*dim_patch + pos_patch] = im_input[imSize.at(h+i, w+j, c)];
}
}
}
}
return patches;
}
示例8: lock
void QsiCcd::startExposure(const Exposure& exposure) {
std::unique_lock<std::recursive_mutex> lock(_camera.mutex);
Ccd::startExposure(exposure);
debug(LOG_DEBUG, DEBUG_LOG, 0, "start QSI exposure");
try {
// set the binning mode
_camera.camera().put_BinX(exposure.mode().x());
_camera.camera().put_BinY(exposure.mode().y());
// compute the frame size in binned pixels, as this is what
// the QSI camera expects
ImagePoint origin = exposure.frame().origin() / exposure.mode();
ImageSize size = exposure.frame().size() / exposure.mode();
ImageRectangle frame(origin, size);
debug(LOG_DEBUG, DEBUG_LOG, 0, "requesting %s image",
frame.toString().c_str());
// set the subframe
_camera.camera().put_NumX(size.width());
_camera.camera().put_NumY(size.height());
_camera.camera().put_StartX(origin.x());
_camera.camera().put_StartY(origin.y());
// turn off the led
debug(LOG_DEBUG, DEBUG_LOG, 0, "turn LED off");
_camera.camera().put_LEDEnabled(false);
// get shutter info
bool light = (exposure.shutter() == Shutter::OPEN);
_camera.camera().StartExposure(exposure.exposuretime(), light);
debug(LOG_DEBUG, DEBUG_LOG, 0, "%fsec %s exposure started",
exposure.exposuretime(), (light) ? "light" : "dark");
} catch (const std::exception& x) {
debug(LOG_ERR, DEBUG_LOG, 0, "bad exposure parameters: %s",
x.what());
cancelExposure();
throw BadParameter(x.what());
}
// check the current state of the camera
exposureStatus();
}
示例9: runtime_error
std::vector<Residual> Analyzer::operator()(const ConstImageAdapter<double>& image) const {
// first find out whether the patch size fits inside the image
if ((patchsize > image.getSize().width())
|| (patchsize > image.getSize().height())) {
throw std::runtime_error("patch size does not fit into image");
}
// build a set of patches
ImageSize size = image.getSize();
int hsteps = (size.width() - patchsize) / spacing;
int xoffset = (size.width() - hsteps * spacing) / 2;
int vsteps = (size.height() - patchsize) / spacing;
int yoffset = (size.height() - vsteps * spacing) / 2;
std::vector<ImagePoint> points;
for (int h = 0; h <= hsteps; h++) {
for (int v = 0; v <= vsteps; v++) {
ImagePoint point(xoffset + h * spacing,
yoffset + v * spacing);
points.push_back(point);
}
}
// now compute the shift for each point
std::vector<Residual> result;
for (auto pt = points.begin(); pt != points.end(); pt++) {
Residual residual = translation(image, *pt, patchsize);
if (residual.valid()) {
result.push_back(residual);
}
}
// display resulting residuals if in debug mode
if (debuglevel >= LOG_DEBUG) {
for (std::vector<Residual>::size_type i = 0; i < result.size();
i++) {
debug(LOG_DEBUG, DEBUG_LOG, 0, "residual[%d] %s", i,
std::string(result[i]).c_str());
}
}
return result;
}
示例10: imagedata
/**
* \brief Write a processed image to a file
*/
void Viewer::writeimage(const std::string& filename) {
ImageSize size = image->size();
Image<RGB<unsigned char> > *outimage
= new Image<RGB<unsigned char> >(size);
unsigned int width = size.width();
unsigned int height = size.height();
uint32_t *i = imagedata();
for (unsigned int x = 0; x < width; x++) {
for (unsigned int y = 0; y < height; y++) {
uint32_t v = i[size.offset(x, y)];
unsigned char R = (v & 0xff0000) >> 16;
unsigned char G = (v & 0x00ff00) >> 8;
unsigned char B = (v & 0x0000ff);
//debug(LOG_DEBUG, DEBUG_LOG, 0, "%d,%d,%d", R, G, B);
outimage->pixel(x, y) = RGB<unsigned char>(R, G, B);
}
}
FITSout out(filename);
out.setPrecious(false);
out.write(ImagePtr(outimage));
}
示例11:
void ImageSizeTest::testBounds() {
debug(LOG_DEBUG, DEBUG_LOG, 0, "testBounds() begin");
CPPUNIT_ASSERT(i1->bounds(ImagePoint(0, 0)));
CPPUNIT_ASSERT(i1->bounds(ImagePoint(6, 0)));
CPPUNIT_ASSERT(i1->bounds(ImagePoint(0, 10)));
CPPUNIT_ASSERT(i1->bounds(ImagePoint(6, 10)));
CPPUNIT_ASSERT(!i1->bounds(ImagePoint(0, -1)));
CPPUNIT_ASSERT(!i1->bounds(ImagePoint(6, -1)));
CPPUNIT_ASSERT(!i1->bounds(ImagePoint(0, 11)));
CPPUNIT_ASSERT(!i1->bounds(ImagePoint(6, 11)));
CPPUNIT_ASSERT(!i1->bounds(ImagePoint(-1, 0)));
CPPUNIT_ASSERT(!i1->bounds(ImagePoint(-1, 10)));
CPPUNIT_ASSERT(!i1->bounds(ImagePoint(7, 0)));
CPPUNIT_ASSERT(!i1->bounds(ImagePoint(7, 10)));
debug(LOG_DEBUG, DEBUG_LOG, 0, "testBounds() end");
}
示例12: subdark
size_t subdark(const ImageSequence&, ImageMean<T>& im,
const Subgrid grid, unsigned int k = 3) {
debug(LOG_DEBUG, DEBUG_LOG, 0, "processing subgrid %s",
grid.toString().c_str());
// we also need the mean of the image to decide which pixels are
// too far off to consider them "sane" pixels
T mean = im.mean(grid);
T var = im.variance(grid);
// now find out which pixels are bad, and mark them using NaNs.
// we consider pixels bad if the deviate from the mean by more
// than three standard deviations
T stddevk = k * sqrt(var);
debug(LOG_DEBUG, DEBUG_LOG, 0, "found mean: %f, variance: %f, "
"stddev%u = %f", mean, var, k, stddevk);
size_t badpixelcount = 0;
SubgridAdapter<T> sga(*im.image, grid);
ImageSize size = sga.getSize();
for (int x = 0; x < size.width(); x++) {
for (int y = 0; y < size.height(); y++) {
T v = sga.pixel(x, y);
// skip NaNs
if (v != v) {
break;
}
if (fabs(v - mean) > stddevk) {
sga.writablepixel(x, y)
= std::numeric_limits<T>::quiet_NaN();
badpixelcount++;
}
}
}
debug(LOG_DEBUG, DEBUG_LOG, 0, "found %u bad pixels", badpixelcount);
return badpixelcount;
}
示例13: saveImage
int saveImage(
const char *p_name,
const vector<float> &i_im,
const ImageSize &p_imSize,
const float &p_min,
const float &p_max
)
{
float* imTmp = new float[p_imSize.whc];
for(int _c=0; _c<p_imSize.nChannels; ++_c)
{
for(int _h=0; _h<p_imSize.height; ++_h)
{
for(int _w=0; _w<p_imSize.width; ++_w)
{
imTmp[_c*p_imSize.wh + _h*p_imSize.width + _w] = i_im[p_imSize.at(_h,_w,_c) ];
}
}
}
//! Check for boundary problems
for (int k = 0; k < p_imSize.whc; k++)
{
imTmp[k] = imTmp[k] < p_min ? p_min : (imTmp[k] > p_max ? p_max : imTmp[k]);
}
if (write_png_f32(p_name, imTmp, p_imSize.width, p_imSize.height, p_imSize.nChannels) != 0)
{
cout << "... failed to save png image :'" << p_name << "'" << endl;
return EXIT_FAILURE;
}
delete[] imTmp;
return EXIT_SUCCESS;
}
示例14:
/**
* \brief Compare two size objects: inequality
*
* Two size objects are unequal if width or height are unequal
*/
bool ImageSize::operator!=(const ImageSize& other) const {
return (_width != other.width()) || (_height != other.height());
}
示例15: debug
/**
* \brief Main function of the Focusing process
*/
void VCurveFocusWork::main(astro::thread::Thread<FocusWork>& /* thread */) {
debug(LOG_DEBUG, DEBUG_LOG, 0, "start focusing work");
if (!complete()) {
focusingstatus(Focusing::FAILED);
throw std::runtime_error("focuser not completely specified");
}
FocusCompute fc;
// determine how many intermediate steps we want to access
if (min() < focuser()->min()) {
throw std::runtime_error("minimum too small");
}
// based on the exposure specification, build an evaluator
ImageSize size = exposure().size();
int radius = std::min(size.width(), size.height()) / 2;
FWHM2Evaluator evaluator(size.center(), radius);
unsigned long delta = max() - min();
for (int i = 0; i < steps(); i++) {
// compute new position
unsigned short position = min() + (i * delta) / (steps() - 1);
debug(LOG_DEBUG, DEBUG_LOG, 0, "measuring position %hu",
position);
// move to new position
moveto(position);
// get an image from the Ccd
focusingstatus(Focusing::MEASURING);
ccd()->startExposure(exposure());
usleep(1000000 * exposure().exposuretime());
ccd()->wait();
ImagePtr image = ccd()->getImage();
// turn the image into a value
FWHMInfo fwhminfo = focusFWHM2_extended(image,
size.center(), radius);
double value = fwhminfo.radius;
// add the new value
fc.insert(std::pair<unsigned short, double>(position, value));
// send the callback data
callback(combine(image, fwhminfo), position, value);
}
// compute the best focus position
double focusposition = 0;
try {
focusposition = fc.focus();
} catch (std::exception& x) {
debug(LOG_DEBUG, DEBUG_LOG, 0, "no optimal focus position: %s",
x.what());
focusingstatus(Focusing::FAILED);
return;
}
debug(LOG_DEBUG, DEBUG_LOG, 0, "optimal focus position: %f",
focusposition);
// plausibility check for the position
if (!((focusposition >= min()) && (focusposition <= max()))) {
focusingstatus(Focusing::FAILED);
debug(LOG_DEBUG, DEBUG_LOG, 0, "focusing failed");
return;
}
// move to the focus position
unsigned short targetposition = focusposition;
moveto(targetposition);
focusingstatus(Focusing::FOCUSED);
debug(LOG_DEBUG, DEBUG_LOG, 0, "target position reached");
}