本文整理汇总了C++中image::Pointer::GetGeometry方法的典型用法代码示例。如果您正苦于以下问题:C++ Pointer::GetGeometry方法的具体用法?C++ Pointer::GetGeometry怎么用?C++ Pointer::GetGeometry使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类image::Pointer
的用法示例。
在下文中一共展示了Pointer::GetGeometry方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ComputeIntensityProfile
static IntensityProfile::Pointer ComputeIntensityProfile(Image::Pointer image, itk::PolyLineParametricPath<3>::Pointer path)
{
IntensityProfile::Pointer intensityProfile = IntensityProfile::New();
itk::PolyLineParametricPath<3>::InputType input = path->StartOfInput();
BaseGeometry* imageGeometry = image->GetGeometry();
const PixelType pixelType = image->GetPixelType();
IntensityProfile::MeasurementVectorType measurementVector;
itk::PolyLineParametricPath<3>::OffsetType offset;
Point3D worldPoint;
itk::Index<3> index;
do
{
imageGeometry->IndexToWorld(path->Evaluate(input), worldPoint);
imageGeometry->WorldToIndex(worldPoint, index);
mitkPixelTypeMultiplex3(ReadPixel, pixelType, image, index, measurementVector.GetDataPointer());
intensityProfile->PushBack(measurementVector);
offset = path->IncrementInput(input);
} while ((offset[0] | offset[1] | offset[2]) != 0);
return intensityProfile;
}
示例2: InsertOpenCVImageAsMitkTimeSlice
void OpenCVToMitkImageFilter::InsertOpenCVImageAsMitkTimeSlice(cv::Mat openCVImage, Image::Pointer mitkImage, int timeStep)
{
// convert it to an mitk::Image
this->SetOpenCVMat(openCVImage);
this->Modified();
this->Update();
//insert it as a timeSlice
mitkImage->GetGeometry(timeStep)->SetSpacing(this->GetOutput()->GetGeometry()->GetSpacing());
mitkImage->GetGeometry(timeStep)->SetOrigin(this->GetOutput()->GetGeometry()->GetOrigin());
mitkImage->GetGeometry(timeStep)->SetIndexToWorldTransform(this->GetOutput()->GetGeometry()->GetIndexToWorldTransform());
mitk::ImageReadAccessor readAccess(this->GetOutput());
mitkImage->SetImportVolume(readAccess.GetData(), timeStep);
mitkImage->Modified();
mitkImage->Update();
m_ImageMutex->Lock();
m_Image = mitkImage;
m_ImageMutex->Unlock();
}
示例3:
void mitk::SurfaceInterpolationController::Interpolate()
{
if (m_CurrentNumberOfReducedContours< 2)
return;
//Setting up progress bar
/*
* Removed due to bug 12441. ProgressBar messes around with Qt event queue which is fatal for segmentation
*/
//mitk::ProgressBar::GetInstance()->AddStepsToDo(8);
m_InterpolateSurfaceFilter->Update();
Image::Pointer distanceImage = m_InterpolateSurfaceFilter->GetOutput();
vtkSmartPointer<vtkMarchingCubes> mcFilter = vtkSmartPointer<vtkMarchingCubes>::New();
mcFilter->SetInput(distanceImage->GetVtkImageData());
mcFilter->SetValue(0,0);
mcFilter->Update();
m_InterpolationResult = 0;
m_InterpolationResult = mitk::Surface::New();
m_InterpolationResult->SetVtkPolyData(mcFilter->GetOutput());
m_InterpolationResult->GetGeometry()->SetOrigin(distanceImage->GetGeometry()->GetOrigin());
vtkSmartPointer<vtkAppendPolyData> polyDataAppender = vtkSmartPointer<vtkAppendPolyData>::New();
for (unsigned int i = 0; i < m_ReduceFilter->GetNumberOfOutputs(); i++)
{
polyDataAppender->AddInput(m_ReduceFilter->GetOutput(i)->GetVtkPolyData());
}
polyDataAppender->Update();
m_Contours->SetVtkPolyData(polyDataAppender->GetOutput());
//Last progress step
/*
* Removed due to bug 12441. ProgressBar messes around with Qt event queue which is fatal for segmentation
*/
//mitk::ProgressBar::GetInstance()->Progress(8);
m_InterpolationResult->DisconnectPipeline();
}
示例4: Run
void QmitkCreatePolygonModelAction::Run(const QList<DataNode::Pointer> &selectedNodes)
{
DataNode::Pointer selectedNode = selectedNodes[0];
Image::Pointer image = dynamic_cast<mitk::Image *>(selectedNode->GetData());
if (image.IsNull())
{
return;
}
try
{
// Get preference properties for smoothing and decimation
IPreferencesService::Pointer prefService = Platform::GetServiceRegistry().GetServiceById<IPreferencesService>(IPreferencesService::ID);
IPreferences::Pointer segPref = prefService->GetSystemPreferences()->Node("/org.mitk.views.segmentation");
bool smoothingHint = segPref->GetBool("smoothing hint", true);
ScalarType smoothing = segPref->GetDouble("smoothing value", 1.0);
ScalarType decimation = segPref->GetDouble("decimation rate", 0.5);
if (smoothingHint)
{
smoothing = 0.0;
Vector3D spacing = image->GetGeometry()->GetSpacing();
for (Vector3D::Iterator iter = spacing.Begin(); iter != spacing.End(); ++iter)
smoothing = max(smoothing, *iter);
}
ShowSegmentationAsSurface::Pointer surfaceFilter = ShowSegmentationAsSurface::New();
// Activate callback functions
itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::Pointer successCommand = itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::New();
successCommand->SetCallbackFunction(this, &QmitkCreatePolygonModelAction::OnSurfaceCalculationDone);
surfaceFilter->AddObserver(ResultAvailable(), successCommand);
itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::Pointer errorCommand = itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::New();
errorCommand->SetCallbackFunction(this, &QmitkCreatePolygonModelAction::OnSurfaceCalculationDone);
surfaceFilter->AddObserver(ProcessingError(), errorCommand);
// set filter parameter
surfaceFilter->SetDataStorage(*m_DataStorage);
surfaceFilter->SetPointerParameter("Input", image);
surfaceFilter->SetPointerParameter("Group node", selectedNode);
surfaceFilter->SetParameter("Show result", true);
surfaceFilter->SetParameter("Sync visibility", false);
surfaceFilter->SetParameter("Median kernel size", 3u);
surfaceFilter->SetParameter("Decimate mesh", m_IsDecimated);
surfaceFilter->SetParameter("Decimation rate", (float) decimation);
if (m_IsSmoothed)
{
surfaceFilter->SetParameter("Apply median", true);
surfaceFilter->SetParameter("Smooth", true);
surfaceFilter->SetParameter("Gaussian SD", sqrtf(smoothing)); // use sqrt to account for setting of variance in preferences
StatusBar::GetInstance()->DisplayText("Smoothed surface creation started in background...");
}
else
{
surfaceFilter->SetParameter("Apply median", false);
surfaceFilter->SetParameter("Smooth", false);
StatusBar::GetInstance()->DisplayText("Surface creation started in background...");
}
surfaceFilter->StartAlgorithm();
}
catch(...)
{
MITK_ERROR << "Surface creation failed!";
}
}
示例5: Run
void QmitkCreatePolygonModelAction::Run(const QList<DataNode::Pointer> &selectedNodes)
{
DataNode::Pointer selectedNode = selectedNodes[0];
Image::Pointer image = dynamic_cast<mitk::Image *>(selectedNode->GetData());
if (image.IsNull())
return;
try
{
if (!m_IsSmoothed)
{
ShowSegmentationAsSurface::Pointer surfaceFilter = ShowSegmentationAsSurface::New();
itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::Pointer successCommand = itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::New();
successCommand->SetCallbackFunction(this, &QmitkCreatePolygonModelAction::OnSurfaceCalculationDone);
surfaceFilter->AddObserver(ResultAvailable(), successCommand);
itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::Pointer errorCommand = itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::New();
errorCommand->SetCallbackFunction(this, &QmitkCreatePolygonModelAction::OnSurfaceCalculationDone);
surfaceFilter->AddObserver(ProcessingError(), errorCommand);
surfaceFilter->SetDataStorage(*m_DataStorage);
surfaceFilter->SetPointerParameter("Input", image);
surfaceFilter->SetPointerParameter("Group node", selectedNode);
surfaceFilter->SetParameter("Show result", true);
surfaceFilter->SetParameter("Sync visibility", false);
surfaceFilter->SetParameter("Smooth", false);
surfaceFilter->SetParameter("Apply median", false);
surfaceFilter->SetParameter("Median kernel size", 3u);
surfaceFilter->SetParameter("Gaussian SD", 1.5f);
surfaceFilter->SetParameter("Decimate mesh", m_IsDecimated);
surfaceFilter->SetParameter("Decimation rate", 0.8f);
StatusBar::GetInstance()->DisplayText("Surface creation started in background...");
surfaceFilter->StartAlgorithm();
}
else
{
ShowSegmentationAsSmoothedSurface::Pointer surfaceFilter = ShowSegmentationAsSmoothedSurface::New();
itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::Pointer successCommand = itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::New();
successCommand->SetCallbackFunction(this, &QmitkCreatePolygonModelAction::OnSurfaceCalculationDone);
surfaceFilter->AddObserver(mitk::ResultAvailable(), successCommand);
itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::Pointer errorCommand = itk::SimpleMemberCommand<QmitkCreatePolygonModelAction>::New();
errorCommand->SetCallbackFunction(this, &QmitkCreatePolygonModelAction::OnSurfaceCalculationDone);
surfaceFilter->AddObserver(mitk::ProcessingError(), errorCommand);
surfaceFilter->SetDataStorage(*m_DataStorage);
surfaceFilter->SetPointerParameter("Input", image);
surfaceFilter->SetPointerParameter("Group node", selectedNode);
berry::IWorkbenchPart::Pointer activePart =
berry::PlatformUI::GetWorkbench()->GetActiveWorkbenchWindow()->GetActivePage()->GetActivePart();
mitk::IRenderWindowPart* renderPart = dynamic_cast<mitk::IRenderWindowPart*>(activePart.GetPointer());
mitk::SliceNavigationController* timeNavController = 0;
if (renderPart != 0)
{
timeNavController = renderPart->GetRenderingManager()->GetTimeNavigationController();
}
int timeNr = timeNavController != 0 ? timeNavController->GetTime()->GetPos() : 0;
surfaceFilter->SetParameter("TimeNr", timeNr);
IPreferencesService::Pointer prefService = Platform::GetServiceRegistry().GetServiceById<IPreferencesService>(IPreferencesService::ID);
IPreferences::Pointer segPref = prefService->GetSystemPreferences()->Node("/org.mitk.views.segmentation");
bool smoothingHint = segPref->GetBool("smoothing hint", true);
float smoothing = (float)segPref->GetDouble("smoothing value", 1.0);
float decimation = (float)segPref->GetDouble("decimation rate", 0.5);
float closing = (float)segPref->GetDouble("closing ratio", 0.0);
if (smoothingHint)
{
smoothing = 0.0;
Vector3D spacing = image->GetGeometry()->GetSpacing();
for (Vector3D::Iterator iter = spacing.Begin(); iter != spacing.End(); ++iter)
smoothing = max(smoothing, *iter);
}
surfaceFilter->SetParameter("Smoothing", smoothing);
surfaceFilter->SetParameter("Decimation", decimation);
surfaceFilter->SetParameter("Closing", closing);
ProgressBar::GetInstance()->AddStepsToDo(8);
StatusBar::GetInstance()->DisplayText("Smoothed surface creation started in background...");
try {
surfaceFilter->StartAlgorithm();
} catch (...)
{
MITK_ERROR<<"Error creating smoothed polygon model: Not enough memory!";
}
}
}
catch(...)
{
//.........这里部分代码省略.........
示例6: planeGeometry
mitk::Image::Pointer mitk::SegTool2D::GetAffectedImageSliceAs2DImage(const PositionEvent* positionEvent, const Image* image)
{
if (!positionEvent) return NULL;
assert( positionEvent->GetSender() ); // sure, right?
unsigned int timeStep = positionEvent->GetSender()->GetTimeStep( image ); // get the timestep of the visible part (time-wise) of the image
// first, we determine, which slice is affected
const PlaneGeometry* planeGeometry( dynamic_cast<const PlaneGeometry*> (positionEvent->GetSender()->GetCurrentWorldGeometry2D() ) );
if ( !image || !planeGeometry ) return NULL;
//Make sure that for reslicing and overwriting the same alogrithm is used. We can specify the mode of the vtk reslicer
vtkSmartPointer<mitkVtkImageOverwrite> reslice = vtkSmartPointer<mitkVtkImageOverwrite>::New();
//set to false to extract a slice
reslice->SetOverwriteMode(false);
reslice->Modified();
//use ExtractSliceFilter with our specific vtkImageReslice for overwriting and extracting
mitk::ExtractSliceFilter::Pointer extractor = mitk::ExtractSliceFilter::New(reslice);
extractor->SetInput( image );
extractor->SetTimeStep( timeStep );
extractor->SetWorldGeometry( planeGeometry );
extractor->SetVtkOutputRequest(false);
extractor->SetResliceTransformByGeometry( image->GetTimeSlicedGeometry()->GetGeometry3D( timeStep ) );
extractor->Modified();
extractor->Update();
Image::Pointer slice = extractor->GetOutput();
/*============= BEGIN undo feature block ========================*/
//specify the undo operation with the non edited slice
m_undoOperation = new DiffSliceOperation(const_cast<mitk::Image*>(image), extractor->GetVtkOutput(), slice->GetGeometry(), timeStep, const_cast<mitk::PlaneGeometry*>(planeGeometry));
/*============= END undo feature block ========================*/
return slice;
}
示例7: DiffSliceOperation
mitk::Image::Pointer mitk::SegTool2D::GetAffectedImageSliceAs2DImage(const PlaneGeometry* planeGeometry, const Image* image, unsigned int timeStep)
{
if ( !image || !planeGeometry ) return NULL;
//Make sure that for reslicing and overwriting the same alogrithm is used. We can specify the mode of the vtk reslicer
vtkSmartPointer<mitkVtkImageOverwrite> reslice = vtkSmartPointer<mitkVtkImageOverwrite>::New();
//set to false to extract a slice
reslice->SetOverwriteMode(false);
reslice->Modified();
//use ExtractSliceFilter with our specific vtkImageReslice for overwriting and extracting
mitk::ExtractSliceFilter::Pointer extractor = mitk::ExtractSliceFilter::New(reslice);
extractor->SetInput( image );
extractor->SetTimeStep( timeStep );
extractor->SetWorldGeometry( planeGeometry );
extractor->SetVtkOutputRequest(false);
extractor->SetResliceTransformByGeometry( image->GetTimeGeometry()->GetGeometryForTimeStep( timeStep ) );
extractor->Modified();
extractor->Update();
Image::Pointer slice = extractor->GetOutput();
/*============= BEGIN undo feature block ========================*/
//specify the undo operation with the non edited slice
m_undoOperation = new DiffSliceOperation(const_cast<mitk::Image*>(image), extractor->GetVtkOutput(), slice->GetGeometry(), timeStep, const_cast<mitk::PlaneGeometry*>(planeGeometry));
/*============= END undo feature block ========================*/
return slice;
}
示例8: ThreadedUpdateFunction
bool ShowSegmentationAsSmoothedSurface::ThreadedUpdateFunction()
{
Image::Pointer image;
GetPointerParameter("Input", image);
float smoothing;
GetParameter("Smoothing", smoothing);
float decimation;
GetParameter("Decimation", decimation);
float closing;
GetParameter("Closing", closing);
int timeNr = 0;
GetParameter("TimeNr", timeNr);
if (image->GetDimension() == 4)
MITK_INFO << "CREATING SMOOTHED POLYGON MODEL (t = " << timeNr << ')';
else
MITK_INFO << "CREATING SMOOTHED POLYGON MODEL";
MITK_INFO << " Smoothing = " << smoothing;
MITK_INFO << " Decimation = " << decimation;
MITK_INFO << " Closing = " << closing;
Geometry3D::Pointer geometry = dynamic_cast<Geometry3D *>(image->GetGeometry()->Clone().GetPointer());
// Make ITK image out of MITK image
typedef itk::Image<unsigned char, 3> CharImageType;
typedef itk::Image<unsigned short, 3> ShortImageType;
typedef itk::Image<float, 3> FloatImageType;
if (image->GetDimension() == 4)
{
ImageTimeSelector::Pointer imageTimeSelector = ImageTimeSelector::New();
imageTimeSelector->SetInput(image);
imageTimeSelector->SetTimeNr(timeNr);
imageTimeSelector->UpdateLargestPossibleRegion();
image = imageTimeSelector->GetOutput(0);
}
ImageToItk<CharImageType>::Pointer imageToItkFilter = ImageToItk<CharImageType>::New();
try
{
imageToItkFilter->SetInput(image);
}
catch (const itk::ExceptionObject &e)
{
// Most probably the input image type is wrong. Binary images are expected to be
// >unsigned< char images.
MITK_ERROR << e.GetDescription() << endl;
return false;
}
imageToItkFilter->Update();
CharImageType::Pointer itkImage = imageToItkFilter->GetOutput();
// Get bounding box and relabel
MITK_INFO << "Extracting VOI...";
int imageLabel = 1;
bool roiFound = false;
CharImageType::IndexType minIndex;
minIndex.Fill(numeric_limits<CharImageType::IndexValueType>::max());
CharImageType::IndexType maxIndex;
maxIndex.Fill(numeric_limits<CharImageType::IndexValueType>::min());
itk::ImageRegionIteratorWithIndex<CharImageType> iter(itkImage, itkImage->GetLargestPossibleRegion());
for (iter.GoToBegin(); !iter.IsAtEnd(); ++iter)
{
if (iter.Get() == imageLabel)
{
roiFound = true;
iter.Set(1);
CharImageType::IndexType currentIndex = iter.GetIndex();
for (unsigned int dim = 0; dim < 3; ++dim)
{
minIndex[dim] = min(currentIndex[dim], minIndex[dim]);
maxIndex[dim] = max(currentIndex[dim], maxIndex[dim]);
}
}
else
{
iter.Set(0);
}
}
if (!roiFound)
{
ProgressBar::GetInstance()->Progress(8);
//.........这里部分代码省略.........
示例9: sliceIterator
//.........这里部分代码省略.........
extentInMM[1] = m_CurrentWorldPlaneGeometry->GetExtentInMM(1);
// The maximum extent is the lenght of the diagonal of the considered plane
double maxExtent = sqrt(extentInMM[0] * extentInMM[0] + extentInMM[1] * extentInMM[1]);
unsigned int xTranlation = (maxExtent - extentInMM[0]);
unsigned int yTranlation = (maxExtent - extentInMM[1]);
size[0] = (maxExtent + xTranlation) / newPixelSpacing[0];
size[1] = (maxExtent + yTranlation) / newPixelSpacing[1];
// Creating an ImageRegion Object
typename SliceImageType::RegionType region;
region.SetSize(size);
region.SetIndex(start);
// Defining the image`s extent and origin by passing the region to it and allocating memory for it
resultSlice->SetRegions(region);
resultSlice->SetSpacing(pixelSpacing);
resultSlice->Allocate();
/*
* Here we create an new geometry so that the transformations are calculated correctly (our resulting slice has a
* different bounding box and spacing)
* The original current worldgeometry must be cloned because we have to keep the directions of the axis vector which
* represents the rotation
*/
right.Normalize();
bottom.Normalize();
// Here we translate the origin to adapt the new geometry to the previous calculated extent
origin[0] -= xTranlation * right[0] + yTranlation * bottom[0];
origin[1] -= xTranlation * right[1] + yTranlation * bottom[1];
origin[2] -= xTranlation * right[2] + yTranlation * bottom[2];
// Putting it together for the new geometry
mitk::BaseGeometry::Pointer newSliceGeometryTest =
dynamic_cast<BaseGeometry *>(m_CurrentWorldPlaneGeometry->Clone().GetPointer());
newSliceGeometryTest->ChangeImageGeometryConsideringOriginOffset(true);
// Workaround because of BUG (#6505)
newSliceGeometryTest->GetIndexToWorldTransform()->SetMatrix(
m_CurrentWorldPlaneGeometry->GetIndexToWorldTransform()->GetMatrix());
// Workaround end
newSliceGeometryTest->SetOrigin(origin);
ScalarType bounds[6] = {0, static_cast<ScalarType>(size[0]), 0, static_cast<ScalarType>(size[1]), 0, 1};
newSliceGeometryTest->SetBounds(bounds);
newSliceGeometryTest->SetSpacing(newPixelSpacing);
newSliceGeometryTest->Modified();
// Workaround because of BUG (#6505)
itk::MatrixOffsetTransformBase<mitk::ScalarType, 3, 3>::MatrixType tempTransform =
newSliceGeometryTest->GetIndexToWorldTransform()->GetMatrix();
// Workaround end
/*
* Now we iterate over the recently created slice.
* For each slice - pixel we check whether there is an according
* pixel in the input - image which can be set in the slice.
* In this way a slice is sampled out of the input - image regrading to the given PlaneGeometry
*/
Point3D currentSliceIndexPointIn2D;
Point3D currentImageWorldPointIn3D;
typename InputImageType::IndexType inputIndex;
SliceIterator sliceIterator(resultSlice, resultSlice->GetLargestPossibleRegion());
sliceIterator.GoToBegin();
while (!sliceIterator.IsAtEnd())
{
/*
* Here we add 0.5 to to assure that the indices are correctly transformed.
* (Because of the 0.5er Bug)
*/
currentSliceIndexPointIn2D[0] = sliceIterator.GetIndex()[0] + 0.5;
currentSliceIndexPointIn2D[1] = sliceIterator.GetIndex()[1] + 0.5;
currentSliceIndexPointIn2D[2] = 0;
newSliceGeometryTest->IndexToWorld(currentSliceIndexPointIn2D, currentImageWorldPointIn3D);
m_ImageGeometry->WorldToIndex(currentImageWorldPointIn3D, inputIndex);
if (m_ImageGeometry->IsIndexInside(inputIndex))
{
resultSlice->SetPixel(sliceIterator.GetIndex(), inputImage->GetPixel(inputIndex));
}
else
{
resultSlice->SetPixel(sliceIterator.GetIndex(), 0);
}
++sliceIterator;
}
Image::Pointer resultImage = ImageToImageFilter::GetOutput();
GrabItkImageMemory(resultSlice, resultImage, nullptr, false);
resultImage->SetClonedGeometry(newSliceGeometryTest);
// Workaround because of BUG (#6505)
resultImage->GetGeometry()->GetIndexToWorldTransform()->SetMatrix(tempTransform);
// Workaround end
}
示例10: FeatureDescriptionPrefix
mitk::GIFVolumetricStatistics::FeatureListType mitk::GIFVolumetricStatistics::CalculateFeatures(const Image::Pointer & image, const Image::Pointer &mask)
{
FeatureListType featureList;
if (image->GetDimension() < 3)
{
return featureList;
}
AccessByItk_3(image, CalculateVolumeStatistic, mask, featureList, FeatureDescriptionPrefix());
AccessByItk_3(mask, CalculateLargestDiameter, image, featureList, FeatureDescriptionPrefix());
vtkSmartPointer<vtkImageMarchingCubes> mesher = vtkSmartPointer<vtkImageMarchingCubes>::New();
vtkSmartPointer<vtkMassProperties> stats = vtkSmartPointer<vtkMassProperties>::New();
mesher->SetInputData(mask->GetVtkImageData());
mesher->SetValue(0, 0.5);
stats->SetInputConnection(mesher->GetOutputPort());
stats->Update();
double pi = vnl_math::pi;
double meshVolume = stats->GetVolume();
double meshSurf = stats->GetSurfaceArea();
double pixelVolume = featureList[1].second;
double pixelSurface = featureList[3].second;
MITK_INFO << "Surface: " << pixelSurface << " Volume: " << pixelVolume;
double compactness1 = pixelVolume / (std::sqrt(pi) * std::pow(meshSurf, 2.0 / 3.0));
double compactness1Pixel = pixelVolume / (std::sqrt(pi) * std::pow(pixelSurface, 2.0 / 3.0));
//This is the definition used by Aertz. However, due to 2/3 this feature is not demensionless. Use compactness3 instead.
double compactness2 = 36 * pi*pixelVolume*pixelVolume / meshSurf / meshSurf / meshSurf;
double compactness2MeshMesh = 36 * pi*meshVolume*meshVolume / meshSurf / meshSurf / meshSurf;
double compactness2Pixel = 36 * pi*pixelVolume*pixelVolume / pixelSurface / pixelSurface / pixelSurface;
double compactness3 = pixelVolume / (std::sqrt(pi) * std::pow(meshSurf, 3.0 / 2.0));
double compactness3MeshMesh = meshVolume / (std::sqrt(pi) * std::pow(meshSurf, 3.0 / 2.0));
double compactness3Pixel = pixelVolume / (std::sqrt(pi) * std::pow(pixelSurface, 3.0 / 2.0));
double sphericity = std::pow(pi, 1 / 3.0) *std::pow(6 * pixelVolume, 2.0 / 3.0) / meshSurf;
double sphericityMesh = std::pow(pi, 1 / 3.0) *std::pow(6 * meshVolume, 2.0 / 3.0) / meshSurf;
double sphericityPixel = std::pow(pi, 1 / 3.0) *std::pow(6 * pixelVolume, 2.0 / 3.0) / pixelSurface;
double surfaceToVolume = meshSurf / meshVolume;
double surfaceToVolumePixel = pixelSurface / pixelVolume;
double sphericalDisproportion = meshSurf / 4 / pi / std::pow(3.0 / 4.0 / pi * pixelVolume, 2.0 / 3.0);
double sphericalDisproportionMesh = meshSurf / 4 / pi / std::pow(3.0 / 4.0 / pi * meshVolume, 2.0 / 3.0);
double sphericalDisproportionPixel = pixelSurface / 4 / pi / std::pow(3.0 / 4.0 / pi * pixelVolume, 2.0 / 3.0);
double asphericity = std::pow(1.0/compactness2, (1.0 / 3.0)) - 1;
double asphericityMesh = std::pow(1.0 / compactness2MeshMesh, (1.0 / 3.0)) - 1;
double asphericityPixel = std::pow(1.0/compactness2Pixel, (1.0 / 3.0)) - 1;
//Calculate center of mass shift
int xx = mask->GetDimensions()[0];
int yy = mask->GetDimensions()[1];
int zz = mask->GetDimensions()[2];
double xd = mask->GetGeometry()->GetSpacing()[0];
double yd = mask->GetGeometry()->GetSpacing()[1];
double zd = mask->GetGeometry()->GetSpacing()[2];
vtkSmartPointer<vtkDoubleArray> dataset1Arr = vtkSmartPointer<vtkDoubleArray>::New();
vtkSmartPointer<vtkDoubleArray> dataset2Arr = vtkSmartPointer<vtkDoubleArray>::New();
vtkSmartPointer<vtkDoubleArray> dataset3Arr = vtkSmartPointer<vtkDoubleArray>::New();
dataset1Arr->SetNumberOfComponents(1);
dataset2Arr->SetNumberOfComponents(1);
dataset3Arr->SetNumberOfComponents(1);
dataset1Arr->SetName("M1");
dataset2Arr->SetName("M2");
dataset3Arr->SetName("M3");
vtkSmartPointer<vtkDoubleArray> dataset1ArrU = vtkSmartPointer<vtkDoubleArray>::New();
vtkSmartPointer<vtkDoubleArray> dataset2ArrU = vtkSmartPointer<vtkDoubleArray>::New();
vtkSmartPointer<vtkDoubleArray> dataset3ArrU = vtkSmartPointer<vtkDoubleArray>::New();
dataset1ArrU->SetNumberOfComponents(1);
dataset2ArrU->SetNumberOfComponents(1);
dataset3ArrU->SetNumberOfComponents(1);
dataset1ArrU->SetName("M1");
dataset2ArrU->SetName("M2");
dataset3ArrU->SetName("M3");
for (int x = 0; x < xx; x++)
{
for (int y = 0; y < yy; y++)
{
for (int z = 0; z < zz; z++)
{
itk::Image<int,3>::IndexType index;
index[0] = x;
index[1] = y;
index[2] = z;
mitk::ScalarType pxImage;
mitk::ScalarType pxMask;
mitkPixelTypeMultiplex5(
mitk::FastSinglePixelAccess,
image->GetChannelDescriptor().GetPixelType(),
image,
image->GetVolumeData(),
//.........这里部分代码省略.........
示例11: GenerateDataFromDwi
void QmitkOdfMaximaExtractionView::GenerateDataFromDwi()
{
typedef itk::OdfMaximaExtractionFilter< float > MaximaExtractionFilterType;
MaximaExtractionFilterType::Pointer filter = MaximaExtractionFilterType::New();
mitk::Geometry3D::Pointer geometry;
if (!m_ImageNodes.empty())
{
try{
Image::Pointer img = dynamic_cast<Image*>(m_ImageNodes.at(0)->GetData());
typedef ImageToItk< MaximaExtractionFilterType::CoefficientImageType > CasterType;
CasterType::Pointer caster = CasterType::New();
caster->SetInput(img);
caster->Update();
filter->SetShCoeffImage(caster->GetOutput());
geometry = img->GetGeometry();
}
catch(itk::ExceptionObject &e)
{
MITK_INFO << "wrong image type: " << e.what();
return;
}
}
else
return;
filter->SetMaxNumPeaks(m_Controls->m_MaxNumPeaksBox->value());
filter->SetPeakThreshold(m_Controls->m_PeakThresholdBox->value());
if (!m_BinaryImageNodes.empty())
{
ItkUcharImgType::Pointer itkMaskImage = ItkUcharImgType::New();
Image::Pointer mitkMaskImg = dynamic_cast<Image*>(m_BinaryImageNodes.at(0)->GetData());
CastToItkImage<ItkUcharImgType>(mitkMaskImg, itkMaskImage);
filter->SetMaskImage(itkMaskImage);
}
switch (m_Controls->m_NormalizationBox->currentIndex())
{
case 0:
filter->SetNormalizationMethod(MaximaExtractionFilterType::NO_NORM);
break;
case 1:
filter->SetNormalizationMethod(MaximaExtractionFilterType::MAX_VEC_NORM);
break;
case 2:
filter->SetNormalizationMethod(MaximaExtractionFilterType::SINGLE_VEC_NORM);
break;
}
filter->GenerateData();
ItkUcharImgType::Pointer numDirImage = filter->GetNumDirectionsImage();
if (m_Controls->m_OutputDirectionImagesBox->isChecked())
{
typedef MaximaExtractionFilterType::ItkDirectionImageContainer ItkDirectionImageContainer;
ItkDirectionImageContainer::Pointer container = filter->GetDirectionImageContainer();
for (int i=0; i<container->Size(); i++)
{
MaximaExtractionFilterType::ItkDirectionImage::Pointer itkImg = container->GetElement(i);
mitk::Image::Pointer img = mitk::Image::New();
img->InitializeByItk( itkImg.GetPointer() );
img->SetVolume( itkImg->GetBufferPointer() );
DataNode::Pointer node = DataNode::New();
node->SetData(img);
QString name(m_ImageNodes.at(0)->GetName().c_str());
name += "_Direction";
name += QString::number(i+1);
node->SetName(name.toStdString().c_str());
GetDataStorage()->Add(node);
}
}
if (m_Controls->m_OutputNumDirectionsBox->isChecked())
{
mitk::Image::Pointer image2 = mitk::Image::New();
image2->InitializeByItk( numDirImage.GetPointer() );
image2->SetVolume( numDirImage->GetBufferPointer() );
DataNode::Pointer node = DataNode::New();
node->SetData(image2);
QString name(m_ImageNodes.at(0)->GetName().c_str());
name += "_NumDirections";
node->SetName(name.toStdString().c_str());
GetDataStorage()->Add(node);
}
if (m_Controls->m_OutputVectorFieldBox->isChecked())
{
mitk::Vector3D outImageSpacing = geometry->GetSpacing();
float minSpacing = 1;
if(outImageSpacing[0]<outImageSpacing[1] && outImageSpacing[0]<outImageSpacing[2])
minSpacing = outImageSpacing[0];
else if (outImageSpacing[1] < outImageSpacing[2])
minSpacing = outImageSpacing[1];
else
minSpacing = outImageSpacing[2];
mitk::FiberBundleX::Pointer directions = filter->GetOutputFiberBundle();
directions->SetGeometry(geometry);
//.........这里部分代码省略.........
示例12: main
/*!
\brief Copies transformation matrix of one image to another
*/
int main(int argc, char* argv[])
{
mitkCommandLineParser parser;
parser.setTitle("Copy Geometry");
parser.setCategory("Preprocessing Tools");
parser.setDescription("Copies transformation matrix of one image to another");
parser.setContributor("MIC");
parser.setArgumentPrefix("--", "-");
parser.addArgument("in", "i", mitkCommandLineParser::InputFile, "Input:", "input image", us::Any(), false);
parser.addArgument("ref", "r", mitkCommandLineParser::InputFile, "Reference:", "reference image", us::Any(), false);
parser.addArgument("alignCentroid", "a", mitkCommandLineParser::Bool, "align centroids", "align centroids", us::Any(), true);
parser.addArgument("out", "o", mitkCommandLineParser::OutputFile, "Output:", "output image", us::Any(), false);
map<string, us::Any> parsedArgs = parser.parseArguments(argc, argv);
if (parsedArgs.size()==0)
return EXIT_FAILURE;
// mandatory arguments
string imageName = us::any_cast<string>(parsedArgs["in"]);
string refImage = us::any_cast<string>(parsedArgs["ref"]);
string outImage = us::any_cast<string>(parsedArgs["out"]);
bool originOnly = false;
// Show a help message
if ( parsedArgs.count("alignCentroid") || parsedArgs.count("a"))
{
originOnly = true;
}
try
{
Image::Pointer source = dynamic_cast<mitk::Image*>(mitk::IOUtil::Load(refImage)[0].GetPointer());
Image::Pointer target = dynamic_cast<mitk::Image*>(mitk::IOUtil::Load(imageName)[0].GetPointer());
if (originOnly)
{
// Calculate correction to align centroids
double c[3];
c[0] = source->GetGeometry()->GetOrigin()[0]
+ source->GetGeometry()->GetExtent(0)/2.0
- target->GetGeometry()->GetOrigin()[0]
- target->GetGeometry()->GetExtent(0)/2.0;
c[1] = source->GetGeometry()->GetOrigin()[1]
+ source->GetGeometry()->GetExtent(1)/2.0
- target->GetGeometry()->GetOrigin()[1]
- target->GetGeometry()->GetExtent(1)/2.0;
c[2] = source->GetGeometry()->GetOrigin()[2]
+ source->GetGeometry()->GetExtent(2)/2.0
- target->GetGeometry()->GetOrigin()[2]
- target->GetGeometry()->GetExtent(2)/2.0;
double newOrigin[3];
newOrigin[0] = target->GetGeometry()->GetOrigin()[0] +c[0];
newOrigin[1] = target->GetGeometry()->GetOrigin()[1] +c[1];
newOrigin[2] = target->GetGeometry()->GetOrigin()[2] +c[2];
target->GetGeometry()->SetOrigin(newOrigin);
}
else
{
mitk::BaseGeometry* s_geom = source->GetGeometry();
mitk::BaseGeometry* t_geom = target->GetGeometry();
t_geom->SetIndexToWorldTransform(s_geom->GetIndexToWorldTransform());
target->SetGeometry(t_geom);
}
mitk::IOUtil::Save(target, outImage);
}
catch (itk::ExceptionObject e)
{
std::cout << e;
return EXIT_FAILURE;
}
catch (std::exception e)
{
std::cout << e.what();
return EXIT_FAILURE;
}
catch (...)
{
std::cout << "ERROR!?!";
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
示例13: image
//.........这里部分代码省略.........
DistanceImageType::IndexType currentIndex;
currentIndex[0] = ( currentPoint[0]-xmin ) / m_DistanceImageSpacing;
currentIndex[1] = ( currentPoint[1]-ymin ) / m_DistanceImageSpacing;
currentIndex[2] = ( currentPoint[2]-zmin ) / m_DistanceImageSpacing;
narrowbandPoints.push(currentIndex);
distanceImg->SetPixel(currentIndex, distance);
NeighborhoodImageIterator::RadiusType radius;
radius.Fill(1);
NeighborhoodImageIterator nIt(radius, distanceImg, distanceImg->GetLargestPossibleRegion());
unsigned int relativeNbIdx[] = {4, 10, 12, 14, 16, 22};
bool isInBounds = false;
while ( !narrowbandPoints.empty() )
{
nIt.SetLocation(narrowbandPoints.front());
narrowbandPoints.pop();
for (int i = 0; i < 6; i++)
{
nIt.GetPixel(relativeNbIdx[i], isInBounds);
if( isInBounds && nIt.GetPixel(relativeNbIdx[i]) == 10)
{
currentIndex = nIt.GetIndex(relativeNbIdx[i]);
currentPoint[0] = currentIndex[0]*m_DistanceImageSpacing + xmin;
currentPoint[1] = currentIndex[1]*m_DistanceImageSpacing + ymin;
currentPoint[2] = currentIndex[2]*m_DistanceImageSpacing + zmin;
distance = this->CalculateDistanceValue(currentPoint);
if ( abs(distance) <= m_DistanceImageSpacing*2 )
{
nIt.SetPixel(relativeNbIdx[i], distance);
narrowbandPoints.push(currentIndex);
}
}
}
}
ImageIterator imgRegionIterator (distanceImg, distanceImg->GetLargestPossibleRegion());
imgRegionIterator.GoToBegin();
double prevPixelVal = 1;
unsigned int _size[3] = { (unsigned int)(size[0] - 1), (unsigned int)(size[1] - 1), (unsigned int)(size[2] - 1) };
//Set every pixel inside the surface to -10 except the edge point (so that the received surface is closed)
while (!imgRegionIterator.IsAtEnd()) {
if ( imgRegionIterator.Get() == 10 && prevPixelVal < 0 )
{
while (imgRegionIterator.Get() == 10)
{
if (imgRegionIterator.GetIndex()[0] == _size[0] || imgRegionIterator.GetIndex()[1] == _size[1] || imgRegionIterator.GetIndex()[2] == _size[2]
|| imgRegionIterator.GetIndex()[0] == 0U || imgRegionIterator.GetIndex()[1] == 0U || imgRegionIterator.GetIndex()[2] == 0U )
{
imgRegionIterator.Set(10);
prevPixelVal = 10;
++imgRegionIterator;
break;
}
else
{
imgRegionIterator.Set(-10);
++imgRegionIterator;
prevPixelVal = -10;
}
}
}
else if (imgRegionIterator.GetIndex()[0] == _size[0] || imgRegionIterator.GetIndex()[1] == _size[1] || imgRegionIterator.GetIndex()[2] == _size[2]
|| imgRegionIterator.GetIndex()[0] == 0U || imgRegionIterator.GetIndex()[1] == 0U || imgRegionIterator.GetIndex()[2] == 0U)
{
imgRegionIterator.Set(10);
prevPixelVal = 10;
++imgRegionIterator;
}
else {
prevPixelVal = imgRegionIterator.Get();
++imgRegionIterator;
}
}
Image::Pointer resultImage = this->GetOutput();
Point3D origin;
origin[0] = xmin;
origin[1] = ymin;
origin[2] = zmin;
CastToMitkImage(distanceImg, resultImage);
resultImage->GetGeometry()->SetOrigin(origin);
resultImage->SetOrigin(origin);
}
示例14: if
bool mitk::SetRegionTool::OnMousePressed ( StateMachineAction*, InteractionEvent* interactionEvent )
{
mitk::InteractionPositionEvent* positionEvent = dynamic_cast<mitk::InteractionPositionEvent*>( interactionEvent );
//const PositionEvent* positionEvent = dynamic_cast<const PositionEvent*>(stateEvent->GetEvent());
if (!positionEvent) return false;
m_LastEventSender = positionEvent->GetSender();
m_LastEventSlice = m_LastEventSender->GetSlice();
int timeStep = positionEvent->GetSender()->GetTimeStep();
// 1. Get the working image
Image::Pointer workingSlice = FeedbackContourTool::GetAffectedWorkingSlice( positionEvent );
if ( workingSlice.IsNull() ) return false; // can't do anything without the segmentation
// if click was outside the image, don't continue
const BaseGeometry* sliceGeometry = workingSlice->GetGeometry();
itk::Index<2> projectedPointIn2D;
sliceGeometry->WorldToIndex( positionEvent->GetPositionInWorld(), projectedPointIn2D );
if ( !sliceGeometry->IsIndexInside( projectedPointIn2D ) )
{
MITK_ERROR << "point apparently not inside segmentation slice" << std::endl;
return false; // can't use that as a seed point
}
// Convert to ipMITKSegmentationTYPE (because ipMITKSegmentationGetContour8N relys on that data type)
itk::Image< ipMITKSegmentationTYPE, 2 >::Pointer correctPixelTypeImage;
CastToItkImage( workingSlice, correctPixelTypeImage );
assert (correctPixelTypeImage.IsNotNull() );
// possible bug in CastToItkImage ?
// direction maxtrix is wrong/broken/not working after CastToItkImage, leading to a failed assertion in
// mitk/Core/DataStructures/mitkSlicedGeometry3D.cpp, 479:
// virtual void mitk::SlicedGeometry3D::SetSpacing(const mitk::Vector3D&): Assertion `aSpacing[0]>0 && aSpacing[1]>0 && aSpacing[2]>0' failed
// solution here: we overwrite it with an unity matrix
itk::Image< ipMITKSegmentationTYPE, 2 >::DirectionType imageDirection;
imageDirection.SetIdentity();
correctPixelTypeImage->SetDirection(imageDirection);
Image::Pointer temporarySlice = Image::New();
// temporarySlice = ImportItkImage( correctPixelTypeImage );
CastToMitkImage( correctPixelTypeImage, temporarySlice );
// check index positions
mitkIpPicDescriptor* originalPicSlice = mitkIpPicNew();
CastToIpPicDescriptor( temporarySlice, originalPicSlice );
int m_SeedPointMemoryOffset = projectedPointIn2D[1] * originalPicSlice->n[0] + projectedPointIn2D[0];
if ( m_SeedPointMemoryOffset >= static_cast<int>( originalPicSlice->n[0] * originalPicSlice->n[1] ) ||
m_SeedPointMemoryOffset < 0 )
{
MITK_ERROR << "Memory offset calculation if mitk::SetRegionTool has some serious flaw! Aborting.." << std::endl;
return false;
}
// 2. Determine the contour that surronds the selected "piece of the image"
// find a contour seed point
unsigned int oneContourOffset = static_cast<unsigned int>( m_SeedPointMemoryOffset ); // safe because of earlier check if m_SeedPointMemoryOffset < 0
/**
* The logic of finding a starting point for the contour is the following:
*
* - If the initial seed point is 0, we are either inside a hole or outside of every segmentation.
* We move to the right until we hit a 1, which must be part of a contour.
*
* - If the initial seed point is 1, then ...
* we now do the same (running to the right) until we hit a 1
*
* In both cases the found contour point is used to extract a contour and
* then a test is applied to find out if the initial seed point is contained
* in the contour. If this is the case, filling should be applied, otherwise
* nothing is done.
*/
unsigned int size = originalPicSlice->n[0] * originalPicSlice->n[1];
/*
unsigned int rowSize = originalPicSlice->n[0];
*/
ipMITKSegmentationTYPE* data = static_cast<ipMITKSegmentationTYPE*>(originalPicSlice->data);
if ( data[oneContourOffset] == 0 ) // initial seed 0
{
for ( ; oneContourOffset < size; ++oneContourOffset )
{
if ( data[oneContourOffset] > 0 ) break;
}
}
else if ( data[oneContourOffset] == 1 ) // initial seed 1
{
unsigned int lastValidPixel = size-1; // initialization, will be changed lateron
bool inSeg = true; // inside segmentation?
for ( ; oneContourOffset < size; ++oneContourOffset )
{
if ( ( data[oneContourOffset] == 0 ) && inSeg ) // pixel 0 and inside-flag set: this happens at the first pixel outside a filled region
{
inSeg = false;
lastValidPixel = oneContourOffset - 1; // store the last pixel position inside a filled region
break;
}
//.........这里部分代码省略.........
示例15: ConvertStreamToNrrdFormat
void ToFNrrdImageWriter::ConvertStreamToNrrdFormat( std::string fileName )
{
int CaptureWidth = 0;
int CaptureHeight = 0;
int PixelNumber = 0;
int ImageSizeInBytes = 0;
if (fileName==this->m_RGBImageFileName)
{
CaptureWidth = this->m_RGBCaptureWidth;
CaptureHeight = this->m_RGBCaptureHeight;
PixelNumber = this->m_RGBPixelNumber;
ImageSizeInBytes = this->m_RGBImageSizeInBytes;
} else
{
CaptureWidth = this->m_ToFCaptureWidth;
CaptureHeight = this->m_ToFCaptureHeight;
PixelNumber = this->m_ToFPixelNumber;
ImageSizeInBytes = this->m_ToFImageSizeInBytes;
}
Image::Pointer imageTemplate = Image::New();
int dimension ;
unsigned int* dimensions;
if(m_ToFImageType == ToFImageType2DPlusT)
{
dimension = 4;
dimensions = new unsigned int[dimension];
dimensions[0] = CaptureWidth;
dimensions[1] = CaptureHeight;
dimensions[2] = 1;
dimensions[3] = this->m_NumOfFrames;
}
else if( m_ToFImageType == ToFImageType3D)
{
dimension = 3;
dimensions = new unsigned int[dimension];
dimensions[0] = CaptureWidth;
dimensions[1] = CaptureHeight;
dimensions[2] = this->m_NumOfFrames;
}
else
{
throw std::logic_error("No image type set, please choose between 2D+t and 3D!");
}
float* floatData;
unsigned char* rgbData;
if (fileName==this->m_RGBImageFileName)
{
rgbData = new unsigned char[PixelNumber*3];
for(int i=0; i<PixelNumber*3; i++)
{
rgbData[i] = i + 0.0;
}
mitk::PixelType RGBType = MakePixelType<unsigned char, itk::RGBPixel<unsigned char>, 3>();
imageTemplate->Initialize( RGBType,dimension, dimensions, 1);
imageTemplate->SetSlice(rgbData, 0, 0, 0);
}
else
{
floatData = new float[PixelNumber];
for(int i=0; i<PixelNumber; i++)
{
floatData[i] = i + 0.0;
}
mitk::PixelType FloatType = MakeScalarPixelType<float>();
imageTemplate->Initialize( FloatType,dimension, dimensions, 1);
imageTemplate->SetSlice(floatData, 0, 0, 0);
}
itk::NrrdImageIO::Pointer nrrdWriter = itk::NrrdImageIO::New();
nrrdWriter->SetNumberOfDimensions(dimension);
nrrdWriter->SetPixelType( imageTemplate->GetPixelType().GetPixelType());
nrrdWriter->SetComponentType( (itk::ImageIOBase::IOComponentType) imageTemplate->GetPixelType().GetComponentType());
if(imageTemplate->GetPixelType().GetNumberOfComponents() > 1)
{
nrrdWriter->SetNumberOfComponents(imageTemplate->GetPixelType().GetNumberOfComponents());
}
itk::ImageIORegion ioRegion( dimension );
mitk::Vector3D spacing = imageTemplate->GetGeometry()->GetSpacing();
mitk::Point3D origin = imageTemplate->GetGeometry()->GetOrigin();
for(unsigned int i = 0; i < dimension; i++)
{
nrrdWriter->SetDimensions(i,dimensions[i]);
nrrdWriter->SetSpacing(i,spacing[i]);
nrrdWriter->SetOrigin(i,origin[i]);
mitk::Vector3D direction;
direction.Set_vnl_vector(imageTemplate->GetGeometry()->GetIndexToWorldTransform()->GetMatrix().GetVnlMatrix().get_column(i));
vnl_vector< double > axisDirection(dimension);
for(unsigned int j = 0; j < dimension; j++)
{
axisDirection[j] = direction[j]/spacing[i];
}
nrrdWriter->SetDirection( i, axisDirection );
ioRegion.SetSize(i, imageTemplate->GetLargestPossibleRegion().GetSize(i) );
ioRegion.SetIndex(i, imageTemplate->GetLargestPossibleRegion().GetIndex(i) );
}
//.........这里部分代码省略.........