本文整理汇总了C++中imagetype::Pointer::GetBufferedRegion方法的典型用法代码示例。如果您正苦于以下问题:C++ Pointer::GetBufferedRegion方法的具体用法?C++ Pointer::GetBufferedRegion怎么用?C++ Pointer::GetBufferedRegion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类imagetype::Pointer
的用法示例。
在下文中一共展示了Pointer::GetBufferedRegion方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: LoadFiles
void LoadFiles(int argc, char* argv[]) {
itkcmds::itkImageIO<ImageType> io;
itkcmds::itkImageIO<LabelType> io2;
_src = io.ReadImageT(argv[1]);
_dst = io.ReadImageT(argv[2]);
_dstLabel = io2.ReadImageT(argv[3]);
_transformOut = argv[4];
_resampledOut = argv[5];
cout << "Transform Output: " << _transformOut << endl;
cout << "Resampled Output: " << _resampledOut << endl;
ImageType::SizeType szDst = _dst->GetBufferedRegion().GetSize();
itk::ContinuousIndex<double,3> szIdx;
for (int i = 0; i < 3; i++) {
szIdx[i] = szDst[i] / 2.0;
}
_dst->TransformContinuousIndexToPhysicalPoint(szIdx, _dstCenter);
itk::ImageRegionConstIteratorWithIndex<LabelType> labelIter(_dstLabel, _dstLabel->GetBufferedRegion());
for (labelIter.GoToBegin(); !labelIter.IsAtEnd(); ++labelIter) {
LabelType::PixelType label = labelIter.Get();
if (label > 0) {
_labelIndexes.push_back(labelIter.GetIndex());
}
}
_centerOfRotation.SetSize(ImageType::ImageDimension);
for (int i = 0; i < 3; i++) {
_centerOfRotation[i] = _dstCenter[i];
}
}
示例2: U
FuzzyCMeans::FuzzyCMeans(QVector<ClusterPoint2D> &points, QVector<ClusterCentroid2D> &clusters, float fuzzy, imageType::Pointer myImage, int numCluster)
{
this->Eps = std::pow(10, -5);
this->isConverged=false;
this->Points = points;
this->Clusters = clusters;
this->myImageHeight = myImage->GetBufferedRegion().GetSize()[0];
this->myImageWidth = myImage->GetBufferedRegion().GetSize()[1];
this->myImage = myImage;
U= boost::numeric::ublas::matrix<double>(Points.size(),Clusters.size());
this->Fuzzyness = fuzzy;
double diff;
imageType::SizeType size;
size[0]=myImageWidth; // x axis
size[1]=myImageHeight; // y
imageType::RegionType region;
region.SetSize(size);
imageType::Pointer image = imageType::New();
image->SetRegions(region);
image->Allocate(); // immagine create
// Iterate through all points to create initial U matrix
for (int i = 0; i < Points.size()-1; i++)
{
ClusterPoint2D p = Points.at(i);
double sum = 0.0;
for (int j = 0; j < Clusters.size()-1; j++)
{
ClusterCentroid2D c = Clusters.at(j);
diff = std::sqrt(std::pow(CalculateEuclideanDistance(p, c), 2.0));
//U(i, j) = (diff == 0) ? Eps : diff;
if (diff==0){
U(i,j)=Eps;
}
else{
U(i,j)=diff;
}
sum += U(i, j);
}
}
this->RecalculateClusterMembershipValues();
}
示例3: createPhantomParticles
static void createPhantomParticles(ImageType::Pointer edgeImg) {
PointVectorType phantoms;
itk::ImageRegionConstIteratorWithIndex<ImageType> iter(edgeImg, edgeImg->GetBufferedRegion());
for (iter.GoToBegin(); !iter.IsAtEnd(); ++iter) {
if (iter.Get() > 0) {
phantoms.push_back(iter.GetIndex()[0]);
phantoms.push_back(iter.GetIndex()[1]);
}
}
g_phantomParticles.push_back(phantoms);
}
示例4: LoadFiles
void LoadFiles(int argc, char* argv[]) {
itkcmds::itkImageIO<ImageType> io;
itkcmds::itkImageIO<LabelType> io2;
_src = io.ReadImageT(argv[2]);
_dst = io.ReadImageT(argv[3]);
_dstLabel = io2.ReadImageT(argv[4]);
_transformOut = argv[5];
_resampledOut = argv[6];
cout << "Transform Output: " << _transformOut << endl;
cout << "Resampled Output: " << _resampledOut << endl;
ImageType::SizeType szDst = _dst->GetBufferedRegion().GetSize();
itk::ContinuousIndex<double,3> szIdx;
for (int i = 0; i < 3; i++) {
szIdx[i] = szDst[i] / 2.0;
}
_dst->TransformContinuousIndexToPhysicalPoint(szIdx, _dstCenter);
}
示例5: loadMask
void loadMask(QString f) {
itkcmds::itkImageIO<ImageType> io;
ImageType::Pointer img = io.ReadImageT(f.toAscii().data());
g_boundaryMapList.push_back(img);
ScalarToRGBFilter::Pointer rgbFilter1 = ScalarToRGBFilter::New();
rgbFilter1->SetInput(img);
rgbFilter1->SetAlphaValue(128);
rgbFilter1->Update();
BitmapType::Pointer maskBitmap = rgbFilter1->GetOutput();
g_maskBitmapList.push_back(maskBitmap);
DistanceMapFilter::Pointer distmapFilter = DistanceMapFilter::New();
distmapFilter->SetInput(img);
distmapFilter->Update();
ImageType::Pointer distImg = distmapFilter->GetOutput();
g_distanceMapList.push_back(distmapFilter->GetOutput());
DistanceVectorImageType::Pointer distVector = distmapFilter->GetVectorDistanceMap();
g_distanceVectorList.push_back(distVector);
ScalarToRGBFilter::Pointer rgbFilter = ScalarToRGBFilter::New();
rgbFilter->SetInput(distImg);
rgbFilter->SetNumberOfThreads(8);
rgbFilter->Update();
BitmapType::Pointer rgbImage = rgbFilter->GetOutput();
g_distanceMapBitmapList.push_back(rgbImage);
EdgeDetectionFilterType::Pointer edgeFilter = EdgeDetectionFilterType::New();
edgeFilter->SetInput(img);
edgeFilter->Update();
ImageType::Pointer edgeImg = edgeFilter->GetOutput();
PointVectorType phantoms;
itk::ImageRegionConstIteratorWithIndex<ImageType> iter(edgeImg, edgeImg->GetBufferedRegion());
for (iter.GoToBegin(); !iter.IsAtEnd(); ++iter) {
if (iter.Get() > 0) {
phantoms.push_back(iter.GetIndex()[0]);
phantoms.push_back(iter.GetIndex()[1]);
}
}
g_phantomParticles.push_back(phantoms);}
示例6: readImage
virtual ReadResult readImage(const std::string& file, const osgDB::ReaderWriter::Options* options) const
{
std::string ext = osgDB::getLowerCaseFileExtension(file);
if (!acceptsExtension(ext)) return ReadResult::FILE_NOT_HANDLED;
std::string fileName = osgDB::findDataFile( file, options );
if (fileName.empty()) return ReadResult::FILE_NOT_FOUND;
notice()<<"Reading DICOM file "<<fileName<<std::endl;
typedef unsigned short PixelType;
const unsigned int Dimension = 3;
typedef itk::Image< PixelType, Dimension > ImageType;
typedef itk::ImageFileReader< ImageType > ReaderType;
ReaderType::Pointer reader = ReaderType::New();
reader->SetFileName( fileName.c_str() );
typedef itk::GDCMImageIO ImageIOType;
ImageIOType::Pointer gdcmImageIO = ImageIOType::New();
reader->SetImageIO( gdcmImageIO );
try
{
reader->Update();
}
catch (itk::ExceptionObject & e)
{
std::cerr << "exception in file reader " << std::endl;
std::cerr << e.GetDescription() << std::endl;
std::cerr << e.GetLocation() << std::endl;
return ReadResult::ERROR_IN_READING_FILE;
}
ImageType::Pointer inputImage = reader->GetOutput();
ImageType::RegionType region = inputImage->GetBufferedRegion();
ImageType::SizeType size = region.GetSize();
ImageType::IndexType start = region.GetIndex();
//inputImage->GetSpacing();
//inputImage->GetOrigin();
unsigned int width = size[0];
unsigned int height = size[1];
unsigned int depth = size[2];
osg::RefMatrix* matrix = new osg::RefMatrix;
notice()<<"width = "<<width<<" height = "<<height<<" depth = "<<depth<<std::endl;
for(unsigned int i=0; i<Dimension; ++i)
{
(*matrix)(i,i) = inputImage->GetSpacing()[i];
(*matrix)(3,i) = inputImage->GetOrigin()[i];
}
osg::Image* image = new osg::Image;
image->allocateImage(width, height, depth, GL_LUMINANCE, GL_UNSIGNED_BYTE, 1);
unsigned char* data = image->data();
typedef itk::ImageRegionConstIterator< ImageType > IteratorType;
IteratorType it(inputImage, region);
it.GoToBegin();
while (!it.IsAtEnd())
{
*data = it.Get();
++data;
++it;
}
image->setUserData(matrix);
matrix->preMult(osg::Matrix::scale(double(image->s()), double(image->t()), double(image->r())));
return image;
}
示例7: main
//.........这里部分代码省略.........
cout << "The Linear Mapping is done\n";
# if Curvature_Anistropic_Diffusion
{
cout << "The Curvature Diffusion is doing\n";
typedef itk::CurvatureAnisotropicDiffusionImageFilter<
ImageType, ImageType > MCD_FilterType;
MCD_FilterType::Pointer MCDFilter = MCD_FilterType::New();
//Initialnization, using the paper's optimal parameters
const unsigned int numberOfIterations = 5;
const double timeStep = 0.0425;
const double conductance = 3;
MCDFilter->SetNumberOfIterations(numberOfIterations);
MCDFilter->SetTimeStep( timeStep );
MCDFilter->SetConductanceParameter( conductance );
MCDFilter->SetInput(inputImage);
try
{
MCDFilter->Update();
}
catch( itk::ExceptionObject & err )
{
cerr << "ExceptionObject caught!" << endl;
cerr << err << endl;
return EXIT_FAILURE;
}
inputImage=MCDFilter->GetOutput();
cout << "The Curvature Diffusion is done\n";
}
#endif
ImageType::RegionType region = inputImage->GetBufferedRegion();
ImageType::SizeType size = region.GetSize();
cout << "input image size: " << size << endl;
sizeX = size[0];
sizeY = size[1];
sizeZ = size[2];
itk::ImageRegionIterator< ImageType >
itr( inputImage, inputImage->GetBufferedRegion() );
itr.GoToBegin();
idx = 0;
volin = (DATATYPEIN*)malloc(sizeX*sizeY*(sizeZ+sizeExpand*2)*sizeof(DATATYPEIN));
while( ! itr.IsAtEnd() )
{
volin[idx] = itr.Get();
++itr;
++idx;
}
//allocate memory for the output image
volout = (DATATYPEOUT*)malloc(sizeX*sizeY*(sizeZ+sizeExpand*2)*sizeof(DATATYPEOUT));
// one pre-processing scheme
GraphcutResults = (unsigned short*)malloc(sizeX*sizeY*(sizeZ+sizeExpand*2)*sizeof(unsigned short));
Neuron_Binarization_3D(volin,GraphcutResults,sizeX,sizeY,sizeZ,0,1);
for (k=0; k<(sizeZ+sizeExpand*2); k++)
for (j=0; j<sizeY; j++)
for (i=0; i<sizeX; i++) {
volout[k *sizeX*sizeY + j *sizeX + i] = 0;
} //initial to zeros
std::cout << "Do you think we need the distance transform to make the centerline of image become bright with higher intensity?";
示例8: RunRegistration
void RunRegistration() {
_transform = TransformType::New();
OptiReporter::Pointer optiReporter = OptiReporter::New();
Metric::Pointer metric = Metric::New();
metric->SetFixedImage(_dst);
bool useIndexes = false;
if (useIndexes) {
_centerOfRotation.SetSize(ImageType::ImageDimension);
for (int i = 0; i < ImageType::ImageDimension; i++) {
_centerOfRotation[i] = i;
}
itk::ImageRegionConstIteratorWithIndex<LabelType> labelIter(_dstLabel, _dstLabel->GetBufferedRegion());
int nPixels = 0;
for (labelIter.GoToBegin(); !labelIter.IsAtEnd(); ++labelIter) {
LabelType::PixelType label = labelIter.Get();
if (label > 0) {
_labelIndexes.push_back(labelIter.GetIndex());
for (int i = 0; i < ImageType::ImageDimension; i++) {
_centerOfRotation[i] += labelIter.GetIndex()[i];
}
nPixels ++;
}
}
for (int i = 0; i < ImageType::ImageDimension; i++) {
_centerOfRotation[i] /= nPixels;
}
metric->SetFixedImageIndexes(_labelIndexes);
_transform->SetFixedParameters(_centerOfRotation);
} else {
metric->SetFixedImageRegion(_dst->GetBufferedRegion());
metric->SetUseAllPixels(true);
_centerOfRotation.SetSize(ImageType::ImageDimension);
for (int i = 0; i < 3; i++) {
_centerOfRotation[i] = _dstCenter[i];
}
_transform->SetFixedParameters(_centerOfRotation);
}
cout << "Fixed Parameters: " << _centerOfRotation << endl;
metric->SetMovingImage(_src);
metric->SetInterpolator(Interpolator::New());
metric->SetTransform(_transform);
metric->Initialize();
Optimizer::Pointer opti = Optimizer::New();
opti->SetCostFunction(metric);
Optimizer::ScalesType scales;
scales.SetSize(TransformType::ParametersDimension);
scales.Fill(1);
if (_method == "affine") {
cout << "apply affine scaling ..." << endl;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
if (i == j) {
scales[3*i+j] = 160;
} else {
scales[3*i+j] = 30;
}
}
}
scales[9] = scales[10] = scales[11] = 0.1;
} else if (_method == "scale") {
scales[0] = scales[1] = scales[2] = 30;
scales[3] = scales[4] = scales[5] = .5;
scales[6] = scales[7] = scales[8] = 100;
} else if (_method == "similar") {
scales[0] = scales[1] = scales[2] = 10;
scales[3] = scales[4] = scales[5] = 0.5;
scales[6] = 100;
}
opti->SetScales(scales);
const int maxIters = 100;
#ifdef USE_CG_OPTIMIZER
opti->SetMaximumIteration(maxIters);
opti->SetMaximumLineIteration(10);
opti->SetUseUnitLengthGradient(true);
opti->SetStepLength(1);
opti->SetToFletchReeves();
#endif
#ifdef USE_GD_OPTIMIZER
opti->SetNumberOfIterations(maxIters);
opti->SetMinimumStepLength(1e-4);
opti->SetMaximumStepLength(3);
opti->SetRelaxationFactor(.5);
opti->SetGradientMagnitudeTolerance(1e-4);
#endif
opti->SetInitialPosition(_transform->GetParameters());
opti->AddObserver(itk::StartEvent(), optiReporter);
opti->AddObserver(itk::IterationEvent(), optiReporter);
opti->StartOptimization();
cout << "Current Cost: " << opti->GetValue() << endl;
_transformResult = opti->GetCurrentPosition();
_transform->SetParameters(opti->GetCurrentPosition());
}
示例9: internalGetSegmentationValues
//calculate segmentation values
bool CTImageTreeItem::internalGetSegmentationValues( SegmentationValues &values) const {
//get ITK image
ImageType::Pointer image = getITKImage();
if (image.IsNull())
return false;
//get buffered region of the image
ImageType::RegionType ctregion = image->GetBufferedRegion();
//define an iterator for the binary segment
typedef itk::ImageRegionConstIteratorWithIndex< BinaryImageType > BinaryIteratorType;
//get binary segment
BinaryImageTreeItem::ImageType::Pointer segment = values.m_segment->getITKImage();
if (segment.IsNull())
return false;
/* typedef itk::ImageFileWriter< ImageType > WriterType;
WriterType::Pointer writer = WriterType::New();
writer->SetFileName( "test.dcm" );
writer->SetInput( image );
try
{
writer->Update();
}
catch( itk::ExceptionObject & excep )
{
std::cerr << "Exception catched !" << std::endl;
std::cerr << excep << std::endl;
}
*/
//create a binary iterator for the segment and its buffered region
BinaryIteratorType binIter( segment, segment->GetBufferedRegion() );
ImageType::PointType point;
//The Accumulators Framework is framework for performing incremental calculations
using namespace boost::accumulators;
//http://boost-sandbox.sourceforge.net/libs/accumulators/doc/html/accumulators/user_s_guide.html#accumulators.user_s_guide.the_accumulators_framework
accumulator_set<double,features<tag::count, tag::min, tag::mean, tag::max, tag::variance> > acc;
//check selected accuracy
if (values.m_accuracy == SegmentationValues::SimpleAccuracy) {
ImageType::IndexType index;
//iterate over the pixel of the binary segment
for(binIter.GoToBegin(); !binIter.IsAtEnd(); ++binIter) {
//if actual value = 255
if (binIter.Value() == BinaryPixelOn) {
//transforms the index to a physical point in the binary segment
segment->TransformIndexToPhysicalPoint(binIter.GetIndex(),point);
//transform that point to an index of the CT image
image->TransformPhysicalPointToIndex(point, index);
//check if that index is inside the CT region
if (ctregion.IsInside(index)) {
//get the pixel value at the index
int t = image->GetPixel(index);
//check if pixel value != -2048
if (isRealHUvalue(t)) {
//accumulate pixel value
acc( t );
}
}
}
}
//check selected accuracy
} else if (values.m_accuracy == SegmentationValues::PreventDoubleSamplingAccuracy) {
ImageType::IndexType index;
//definition for a set of indices, which can be compared
typedef std::set< ImageType::IndexType, IndexCompareFunctor > IndexSetType;
IndexSetType indexSet;
//iterate over the pixel of the binary segment
for(binIter.GoToBegin(); !binIter.IsAtEnd(); ++binIter) {
//if actual value = 255
if (binIter.Value() == BinaryPixelOn) {
//transforms the index to a physical point in the binary segment
segment->TransformIndexToPhysicalPoint(binIter.GetIndex(),point);
//transform that point to an index of the CT image
image->TransformPhysicalPointToIndex(point, index);
//check if that index is inside the CT region
if (ctregion.IsInside(index)) {
std::pair<IndexSetType::iterator,IndexSetType::iterator> ret;
//
ret = indexSet.equal_range(index);
//If x does not match any key in the container, the range returned has a length of zero,
//with both iterators pointing to the nearest value greater than x, if any,
//or to set::end if x is greater than all the elements in the container.
if (ret.first == ret.second) {
indexSet.insert(ret.first, index);
//get the pixel value at the index
int t = image->GetPixel(index);
//check if pixel value != -2048
if (isRealHUvalue(t)) {
//accumulate pixel value
acc( t );
}
}
}
}
}
//.........这里部分代码省略.........
示例10: main
int main( int argc, char* argv[] )
{
if( argc != 3 )
{
std::cerr << "Usage: "<< std::endl;
std::cerr << argv[0];
std::cerr << " <InputFileName> n";
std::cerr << std::endl;
return EXIT_FAILURE;
}
int operations = atoi(argv[2]);
//sscanf(&operations,"%d",argv[2]);
//printf("%d\n", operations);
itk::TimeProbe itkClock;
double t0 = 0.0;
double tf = 0.0;
itk::MultiThreader::SetGlobalDefaultNumberOfThreads(1);
// Loading file
ReaderType::Pointer reader = ReaderType::New();
reader->SetFileName( argv[1] );
reader->Update();
ImageType::Pointer image = reader->GetOutput();
#ifdef GPU
GPUReaderType::Pointer gpureader = GPUReaderType::New();
gpureader->SetFileName( argv[1] );
gpureader->Update();
GPUImageType::Pointer gpuImage = gpureader->GetOutput();
#endif
saveFile((char*) "/tmp/itk_input.dcm", image);
// Allocate output image
ImageType::Pointer output = ImageType::New();
ImageType::RegionType region = image->GetBufferedRegion();
output->SetRegions( region );
output->SetOrigin( image->GetOrigin() );
output->SetSpacing( image->GetSpacing() );
output->Allocate();
// Negative
typedef itk::UnaryFunctorImageFilter<ImageType,ImageType,
Negate<ImageType::PixelType,ImageType::PixelType> > NegateImageFilterType;
NegateImageFilterType::Pointer negateFilter = NegateImageFilterType::New();
negateFilter = NegateImageFilterType::New();
negateFilter->SetInput(image);
#ifndef GPU_only
itkClock.Start();
TimerStart();
for(int n = 0; n < operations; n++)
{
negateFilter->Modified();
negateFilter->Update();
}
itkClock.Stop();
printf("Tempo gasto para fazer %d negative: %s\n",operations, getTimeElapsedInSeconds());
tf = itkClock.GetTotal();
std::cout << "My: " << (tf - t0) << std::endl;
t0 = tf;
#endif
// Saving Not result
saveFile((char*) "/tmp/itk_not.dcm", negateFilter->GetOutput());
#ifdef GPU
// GPU Negative
typedef itk::GPUUnaryFunctorImageFilter<ImageType,ImageType,
Negate<ImageType::PixelType,ImageType::PixelType> > GPUNegateImageFilterType;
GPUNegateImageFilterType::Pointer gpuNegateFilter = GPUNegateImageFilterType::New();
gpuNegateFilter->SetInput(gpureader->GetOutput());
gpuNegateFilter->Update();
// Saving Not result
//saveFile("/tmp/itk_gpu_not.dcm", gpuNegateFilter->GetOutput());
#endif
// Common Threshold
int lowerThreshold = 100;
int upperThreshold = 200;
// Threshold
typedef itk::BinaryThresholdImageFilter <ImageType, ImageType>
BinaryThresholdImageFilterType;
BinaryThresholdImageFilterType::Pointer thresholdFilter
= BinaryThresholdImageFilterType::New();
thresholdFilter = BinaryThresholdImageFilterType::New();
thresholdFilter->SetInput(reader->GetOutput());
//.........这里部分代码省略.........
示例11: getXiaoLiangOtsuThreshold
// This function is designed to compute the optimal threshold using OTSU method;
// this algoritm is implemented by xiao liang based on ITK's OTSU algorithm
double VolumeProcess::getXiaoLiangOtsuThreshold(ImageType::Pointer img)
{
if(!img)
return 0;
double threshold = 0;
unsigned char m_min = 255;
unsigned char m_max = 0;
double m_mean = 0.0;
double m_variance = 0.0;
ImageType::RegionType region = img->GetBufferedRegion();
double numPix = region.GetSize(2)*region.GetSize(1)*region.GetSize(0);
//Get min, max, and mean:
itk::ImageRegionIterator< ImageType > itr( img, region );
for(itr.GoToBegin(); !itr.IsAtEnd(); ++itr)
{
double val = itr.Get();
if(val > m_max) m_max = val;
if(val < m_min) m_min = val;
m_mean += itr.Get();
}
m_mean = m_mean/numPix;
if(debug)
std::cerr << "Max = " << (int)m_max << ", Min = " << (int)m_min << std::endl;
//Do a sanity check
if ( m_min >= m_max)
{
threshold=m_min;
return threshold;
}
//Get the variance:
for(itr.GoToBegin(); !itr.IsAtEnd(); ++itr)
{
double val = (double)itr.Get();
m_variance += (val-m_mean)*(val-m_mean);
}
//These were not Xiao Liang's version:
//m_variance = m_variance / numPix;
//m_variance = sqrt(m_variance);
threshold = m_mean - (m_variance/30);
// this step is only initialized a good experimental value for m_Threshold, because the 3D image
// is sparse, there are lots of zero values;
//Create a histogram & init to zero
double relativeFrequency[m_NumberOfHistogramBins];
for ( unsigned char j = 0; j < m_NumberOfHistogramBins; j++ )
{
relativeFrequency[j] = 0.0;
}
double binMultiplier = (double)m_NumberOfHistogramBins/(double)(m_max-m_min);
if(debug)
std::cerr << "binMultiplier = " << binMultiplier << std::endl;
unsigned int binNumber;
for(itr.GoToBegin(); !itr.IsAtEnd(); ++itr)
{
double val = itr.Get();
if ( val == m_min )
{
binNumber = 0;
}
else
{
binNumber = (unsigned int)(((val-m_min)*binMultiplier) - 1);
if ( binNumber == m_NumberOfHistogramBins ) // in case of rounding errors
{
binNumber -= 1;
}
}
relativeFrequency[binNumber] += 1.0;
}
// normalize the frequencies
double totalMean = 0.0;
for ( unsigned char j = 0; j < m_NumberOfHistogramBins; j++ )
{
relativeFrequency[j] /= numPix;
totalMean += (j+1) * relativeFrequency[j];
}
// compute Otsu's threshold by maximizing the between-class variance
double freqLeft = relativeFrequency[0];
double meanLeft = 1.0;
double meanRight = ( totalMean - freqLeft ) / ( 1.0 - freqLeft );
double maxVarBetween = freqLeft * ( 1.0 - freqLeft ) * sqrt( meanLeft - meanRight );
int maxBinNumber = 0;
double freqLeftOld = freqLeft;
double meanLeftOld = meanLeft;
//.........这里部分代码省略.........