本文整理汇总了C++中image_type::width方法的典型用法代码示例。如果您正苦于以下问题:C++ image_type::width方法的具体用法?C++ image_type::width怎么用?C++ image_type::width使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类image_type
的用法示例。
在下文中一共展示了image_type::width方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Image2DIB
void Image2DIB(const image_type &image, BYTE *dib)
{
using namespace rss;
const int all_header_size = 14 + 40 +1024;
size_t dib_size = all_header_size
+ ( image.width() + ( (image.width()%4)?(4-image.width()%4):0) )* image.height();
std::ostrstream output(reinterpret_cast<char *>(dib), dib_size);
BMPImageIO<image_type> image_io;
if(!image_io.write(output, image))
throw rss::Exception("library can not write this dib");
}
示例2: add_image
stereoview::image_id_type stereoview::add_image(const image_type& img, const option<float>& focal_length)
{
if (!stored_image_db.empty() && (img.width() != width() || img.height() != height()))
throw localized_invalid_argument(HERE(nfmt<4>("image size is %1 x %2 while expected size is %3 x %4") (img.width()) (img.height()) (width()) (height())));
const unsigned int id = fresh_int();
add_stored_image(id, img, focal_length);
return id;
}
示例3: fft_shift_x
void fft_shift_x(image_type& I)
{
int half_w = I.width() >> 1;
int half_w_1 = half_w-1;
int w_1 = I.width()-1;
int quater_w = half_w >> 1;
typename image_type::iterator iter1 = I.begin();
typename image_type::iterator end = I.end();
for(;iter1 != end;iter1 += I.width())
{
typename image_type::iterator iter2 = iter1+half_w;
for(int x = 0,rx = half_w_1;x < quater_w;++x,--rx)
{
std::swap(iter1[x],iter1[rx]);
std::swap(iter2[x],iter2[rx]);
}
}
}
示例4: image
TestRegionGrowing( ) : image( 10, 10 )
{
for( size_type j = 1 ; j < image.height( ) - 1 ; j++ )
{
for( size_type i = 1 ; i < image.width( ) - 1 ; i++ )
{
image( i, j ) = 1;
}
}
}
示例5: RSS_MultiSensor_FFT_Register
void RSS_MultiSensor_FFT_Register(const image_type &VLImage, const image_type &IRImage, float para[6], image_type &result)
{
using namespace rss;
image_type temp;
if(para) {
double scale1 = para[0] / para[3] * para[4] / para[1] * VLImage.width() / IRImage.width();
double scale2 = para[0] / para[3] * para[5] / para[2] * VLImage.height() / IRImage.height();
double scale = sqrt(scale1*scale2);
HomoModel pre_trans_model;
pre_trans_model.SetSimilarity(scale, 0, IRImage.width() * (1 - scale) / 2.0, IRImage.height() * (1 - scale) / 2.0);
HomoTrans<image_type> pre_trans(pre_trans_model, VLImage.size());
pre_trans.operator ()(IRImage, temp);
} else {
temp = IRImage;
}
HomoModel model;
SimilarityEstimation(true, true, false, SimilarityEstimation::OP_NONE, SimilarityEstimation::FILTER_NONE)(VLImage, temp, model);
HomoTrans<image_type> homo_trans(model, VLImage.size());
homo_trans.operator()(temp, result);
}
示例6: RSS_MultiSensor_PCA_Fusion
void RSS_MultiSensor_PCA_Fusion(const image_type &image1, const image_type &image2, image_type &result)
{
using namespace rss;
rss::ImageVector<image_type> input_vector;
input_vector.push_back(image1);
if(image1.size() != image2.size()) {
BilinearInterpolation<image_type> interpolate(image1.size());
image_type temp;
interpolate(image2, temp);
input_vector.push_back(temp);
} else {
input_vector.push_back(image2);
}
PCAFusion<image_type> fusion(image1.width(), image1.height());
fusion(input_vector, result);
}
示例7: fft_shift_y
void fft_shift_y(image_type& I)
{
int w = I.width();
int half_wh = I.plane_size() >> 1;
int half_wh_1 = half_wh-w;
int quater_wh = half_wh >> 1;
typename image_type::iterator iter1 = I.begin();
typename image_type::iterator end = I.end();
for(;iter1 != end;iter1 += I.plane_size())
{
typename image_type::iterator iter1_x = iter1;
typename image_type::iterator iter1_x_end = iter1+w;
typename image_type::iterator iter2_x = iter1_x+half_wh;
for(;iter1_x != iter1_x_end;++iter1_x,++iter2_x)
for(int y = 0,ry = half_wh_1;y < quater_wh;y+=w,ry-=w)
{
std::swap(iter1_x[y],iter1_x[ry]);
std::swap(iter2_x[y],iter2_x[ry]);
}
}
}
示例8: RSS_MultiSensor_Fleet_Region
void RSS_MultiSensor_Fleet_Region(const image_type &VLImage, const image_type &IRImage, image_type &result1, image_type &result2, float regions[61])
{
using namespace rss;
rss::FleetEdgeDetector detector;
detector(IRImage, result1);
// result1 holds the fution image
RSS_MultiSensor_Wavelet_Fusion(VLImage,IRImage,result1);
// put the image of result into the Memeory dc
HDC m_hDC = CreateCompatibleDC(::GetDC(NULL));
HBITMAP m_hBitmap = CreateCompatibleBitmap(::GetDC(NULL),result1.width(),result1.height());
HPEN m_hPen = CreatePen(PS_SOLID,1,RGB(255,255,255));
HBRUSH m_hBrush = (HBRUSH)GetStockObject(NULL_BRUSH);
SelectObject(m_hDC,m_hBitmap);
SelectObject(m_hDC,m_hPen);
SelectObject(m_hDC,m_hBrush);
SetBkMode(m_hDC,TRANSPARENT);
SetTextColor(m_hDC,RGB(255,255,255));
for (int x = 0; x < result1.width(); x ++) {
for (int y = 0; y < result1.height(); y ++) {
int gray = rss::pixel_cast<rss::GrayPixel>(result1(x,y));
COLORREF color = RGB(gray,gray,gray);
SetPixel(m_hDC,x,y,color);
}
}
ObjectiveRegions r = detector.objective_region();
regions[0] = min(20u, static_cast<float>(r.size()));
for(size_t i = 0; i < min(20U, r.size()); i++) {
regions[i*3 + 1] = r[i].center.x();
regions[i*3 + 2] = r[i].center.y();
regions[i*3 + 3] = r[i].reliability;
}
// fill the background
result2.resize(result1.size());
for (int x = 0; x < result2.width(); x ++)
for (int y = 0; y < result2.height(); y ++)
result2(x,y) = 0;
// file the sensitive region
for (int i = 0; i < min(20U,r.size()); i ++) {
::Rectangle(m_hDC,r[i].region.left(),r[i].region.top(),r[i].region.right(),r[i].region.bottom());
int dx = (r[i].region.right() - r[i].region.left()) / 2;
int dy = (r[i].region.bottom() - r[i].region.top()) / 2;
char* str = RSS_MultiSensor_Get_String(i+1);
TextOut(m_hDC,r[i].region.left()+dx,r[i].region.top()+dy,str,strlen(str));
image_type region;
region.resize(r[i].region.size());
for (int x = r[i].region.left(); x < r[i].region.right(); x ++) {
for (int y = r[i].region.top(); y < r[i].region.bottom(); y ++) {
region(x-r[i].region.left(),y-r[i].region.top()) = VLImage(x,y);
}
}
RegionGrow<image_type> regionGrow(1.0/5.0,10);
image_type region_result;
regionGrow(region,region_result);
for (int x = r[i].region.left(); x < r[i].region.right(); x ++) {
for (int y = r[i].region.top(); y < r[i].region.bottom(); y ++) {
result2(x,y) = region_result(x-r[i].region.left(),y-r[i].region.top());
}
}
}
// put the image of Memory dc back to result1
for (int x = 0; x < VLImage.width(); x ++) {
for (int y = 0; y < VLImage.height(); y ++) {
result1(x,y) = rss::pixel_cast<rss::RealPixel>GetRValue(GetPixel(m_hDC,x,y));
}
}
DeleteObject(m_hDC);
DeleteObject(m_hBitmap);
DeleteObject(m_hPen);
DeleteObject(m_hBrush);
}
示例9:
void stereoview::stored_image::image_to_gray_raw(const image_type& img, unsigned char* raw)
{
for (int y = 0; y < img.height(); ++y)
for (int x = 0; x < img.width(); ++x)
*raw++ = qGray(img.pixel(x, y));
}
示例10: main
int main( int argc , char *argv[ ] )
{
// 入力ボリュームデータの作成
const vector_type c( ( va.width( ) - 1 ) / 2.0, ( va.height( ) - 1 ) / 2.0, ( va.depth( ) - 1 ) / 2.0 );
vector_type c0( c ), c1( c );
c0.x -= 8.0;
c1.x += 8.0;
for( size_t k = 0 ; k < va.depth( ) ; k ++ )
{
for( size_t j = 0 ; j < va.height( ) ; j ++ )
{
for( size_t i = 0 ; i < va.width( ) ; i ++ )
{
const vector_type p( static_cast< double >( i ), static_cast< double >( j ), static_cast< double >( k ) );
const double d0 = distance( p, c0 );
const double d1 = distance( p, c1 );
va( i, j, k ) = 31.5 - minimum( d0, d1 );
if( va( i, j, k ) < 0.0 )
{
va( i, j, k ) = 0.0;
}
}
}
}
// 等値面生成の前処理(入力ボリュームデータを渡す)
// 本サンプルのように描画の度に閾値を動的に変化させながら等値面生成を行う場合に生成処理時間を短縮できる
// この行をコメントアウトしたものとしないものを比較すると処理速度の違いがわかる
mcs.preprocess( va );
// 等値面生成パラメータ設定
mcs.offset( -31.5, -15.5, -15.5 );
mcs.scale( 0.1, 0.1, 0.1 );
// 等値面生成結果格納のため,大きめの領域を確保しておくことで
// ベクタの要素サイズの増加に伴うメモリ再確保の発生を抑制
pv.reserve( 32768 );
nv.reserve( 32768 );
sv.reserve( 32768 );
glutInit( &argc , argv );
glutInitWindowPosition( 100 , 100 );
glutInitWindowSize( 400 , 400 );
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH );
glutCreateWindow( "mist::marching_cubes" );
glutDisplayFunc( disp );
glutIdleFunc( idle );
glMatrixMode( GL_PROJECTION );
glFrustum( -1 , 1 , -1 , 1 , 1 , 5 );
gluLookAt( 0.0, 0.0, 4.0, 0.0, 0.0, -1.0, 0.0, 1.0, 0.0 );
GLfloat lpos[ ] = { 0 , 0 , 4 , 1 };
glLightfv( GL_LIGHT0 , GL_POSITION , lpos );
glEnable( GL_LIGHTING );
glEnable( GL_LIGHT0 );
glEnable( GL_DEPTH_TEST );
glEnable( GL_CULL_FACE );
glCullFace( GL_BACK );
glMatrixMode( GL_MODELVIEW );
glutMainLoop( );
return 0;
}