本文整理汇总了Java中mpicbg.spim.data.registration.ViewRegistrations.getViewRegistration方法的典型用法代码示例。如果您正苦于以下问题:Java ViewRegistrations.getViewRegistration方法的具体用法?Java ViewRegistrations.getViewRegistration怎么用?Java ViewRegistrations.getViewRegistration使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mpicbg.spim.data.registration.ViewRegistrations
的用法示例。
在下文中一共展示了ViewRegistrations.getViewRegistration方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setModelToCalibration
import mpicbg.spim.data.registration.ViewRegistrations; //导入方法依赖的package包/类
public static void setModelToCalibration( final SpimData spimData, final ViewId viewId, final double minResolution )
{
setModelToIdentity( spimData, viewId );
final ViewRegistrations viewRegistrations = spimData.getViewRegistrations();
final ViewRegistration r = viewRegistrations.getViewRegistration( viewId );
final ViewDescription viewDescription = spimData.getSequenceDescription().getViewDescription(
viewId.getTimePointId(), viewId.getViewSetupId() );
VoxelDimensions voxelSize = ViewSetupUtils.getVoxelSizeOrLoad( viewDescription.getViewSetup(), viewDescription.getTimePoint(), spimData.getSequenceDescription().getImgLoader() );
final double calX = voxelSize.dimension( 0 ) / minResolution;
final double calY = voxelSize.dimension( 1 ) / minResolution;
final double calZ = voxelSize.dimension( 2 ) / minResolution;
final AffineTransform3D m = new AffineTransform3D();
m.set( calX, 0.0f, 0.0f, 0.0f,
0.0f, calY, 0.0f, 0.0f,
0.0f, 0.0f, calZ, 0.0f );
final ViewTransform vt = new ViewTransformAffine( "calibration", m );
r.preconcatenateTransform( vt );
}
示例2: numReorientated
import mpicbg.spim.data.registration.ViewRegistrations; //导入方法依赖的package包/类
protected Pair< Integer, Integer > numReorientated()
{
final ViewRegistrations vrs = spimData.getViewRegistrations();
int isReorientated = 0;
int sumViews = 0;
for ( final ViewId viewId : viewIdsToProcess )
{
final ViewDescription vd = spimData.getSequenceDescription().getViewDescription( viewId );
if ( !vd.isPresent() )
continue;
final ViewRegistration vr = vrs.getViewRegistration( viewId );
final ViewTransform vt = vr.getTransformList().get( 0 );
++sumViews;
if ( vt.hasName() && vt.getName().startsWith( reorientationDescription ) )
++isReorientated;
}
return new ValuePair< Integer, Integer >( isReorientated, sumViews );
}
示例3: preConcatenateTransform
import mpicbg.spim.data.registration.ViewRegistrations; //导入方法依赖的package包/类
public static void preConcatenateTransform( final SpimData spimData, final ViewId viewId, final AffineTransform3D model, final String name )
{
final ViewRegistrations viewRegistrations = spimData.getViewRegistrations();
// update the view registration
final ViewRegistration vr = viewRegistrations.getViewRegistration( viewId );
final ViewTransform vt = new ViewTransformAffine( name, model );
vr.preconcatenateTransform( vt );
}
示例4: assembleRegistrationNames
import mpicbg.spim.data.registration.ViewRegistrations; //导入方法依赖的package包/类
public static HashMap< String, Integer > assembleRegistrationNames( final SpimData data, final List< ViewId > viewIds )
{
final ViewRegistrations vr = data.getViewRegistrations();
final SequenceDescription sd = data.getSequenceDescription();
final HashMap< String, Integer > names = new HashMap< String, Integer >();
for ( final ViewId viewId: viewIds )
{
final ViewDescription vd = sd.getViewDescription( viewId );
if ( !vd.isPresent() )
continue;
final ViewRegistration r = vr.getViewRegistration( vd );
final String rName = r.getTransformList().get( 0 ).getName();
if ( rName != null )
{
if ( names.containsKey( rName ) )
names.put( rName, names.get( rName ) + 1 );
else
names.put( rName, 1 );
}
}
return names;
}
示例5: computeStitching
import mpicbg.spim.data.registration.ViewRegistrations; //导入方法依赖的package包/类
public static < T extends RealType< T > > Pair<Pair< AffineGet, Double >, RealInterval> computeStitching(
final Group<? extends ViewId> viewIdsA,
final Group<? extends ViewId> viewIdsB,
final ViewRegistrations vrs,
final PairwiseStitchingParameters params,
final AbstractSequenceDescription< ?,? extends BasicViewDescription<?>, ? > sd,
final GroupedViewAggregator gva,
final long[] downsampleFactors,
final ExecutorService service )
{
// the transformation that maps the downsampled image coordinates back to the original input(!) image space
final AffineTransform3D dsCorrectionT1 = new AffineTransform3D();
final AffineTransform3D dsCorrectionT2 = new AffineTransform3D();
// get Overlap Bounding Box
final List<List<ViewId>> views = new ArrayList<>();
views.add( new ArrayList<>(viewIdsA.getViews()) );
views.add( new ArrayList<>(viewIdsB.getViews()) );
BoundingBoxMaximalGroupOverlap< ViewId > bbDet = new BoundingBoxMaximalGroupOverlap<ViewId>( views, sd, vrs );
BoundingBox bbOverlap = bbDet.estimate( "Max Overlap" );
// this should be caught outside of this method already, but check nonetheless
if (bbOverlap == null)
return null;
// get one image per group
final RandomAccessibleInterval<T> img1 = gva.aggregate( viewIdsA, sd, downsampleFactors, dsCorrectionT1 );
final RandomAccessibleInterval<T> img2 = gva.aggregate( viewIdsB, sd, downsampleFactors, dsCorrectionT2 );
if (img1 == null || img2 == null)
{
IOFunctions.println( "WARNING: Tried to open missing View when computing Stitching for " + viewIdsA + " and " +
viewIdsB + ". No link between those could be determined");
return null;
}
// get translations
// TODO: is the 2d check here meaningful?
// everything will probably be 3d at this point, since ImgLoaders return 3d images
boolean is2d = img1.numDimensions() == 2;
Pair< AffineGet, TranslationGet > t1 = TransformTools.getInitialTransforms( vrs.getViewRegistration(viewIdsA.iterator().next()), is2d, dsCorrectionT1 );
Pair< AffineGet, TranslationGet > t2 = TransformTools.getInitialTransforms( vrs.getViewRegistration(viewIdsB.iterator().next()), is2d, dsCorrectionT2 );
final Pair< Translation, Double > result = PairwiseStitching.getShift( img1, img2, t1.getB(), t2.getB(), params, service );
if (result == null)
return null;
for (int i = 0; i< result.getA().numDimensions(); ++i)
result.getA().set( result.getA().get(i, result.getA().numDimensions()) * downsampleFactors[i], i );
// TODO (?): Different translational part of downsample Transformations should be considered via TransformTools.getInitialTransforms
// we probalbly do not have to correct for them ?
// NB: as we will deal in global coordinates, not pixel coordinates in global optimization,
// calculate global R' = VT^-1 * R * VT from pixel transformation R
ViewRegistration vrOld = vrs.getViewRegistration(viewIdsB.iterator().next());
AffineTransform3D resTransform = new AffineTransform3D();
resTransform.set( result.getA().getRowPackedCopy() );
resTransform.concatenate( vrOld.getModel().inverse() );
resTransform.preConcatenate( vrOld.getModel() );
System.out.println("shift (pixel coordinates): " + Util.printCoordinates(result.getA().getTranslationCopy()));
System.out.println("shift (global coordinates): " + Util.printCoordinates(resTransform.getRowPackedCopy()));
System.out.print("cross-corr: " + result.getB());
return new ValuePair<>( new ValuePair<>( resTransform, result.getB() ), bbOverlap );
}
示例6: computeStitchingLucasKanade
import mpicbg.spim.data.registration.ViewRegistrations; //导入方法依赖的package包/类
public static < T extends RealType< T > > Pair<Pair< AffineGet, Double >, RealInterval> computeStitchingLucasKanade(
final Group<? extends ViewId> viewIdsA,
final Group<? extends ViewId> viewIdsB,
final ViewRegistrations vrs,
final LucasKanadeParameters params,
final AbstractSequenceDescription< ?,? extends BasicViewDescription<?>, ? > sd,
final GroupedViewAggregator gva,
final long[] downsampleFactors,
final ExecutorService service )
{
// the transformation that maps the downsampled image coordinates back to the original input(!) image space
final AffineTransform3D dsCorrectionT1 = new AffineTransform3D();
final AffineTransform3D dsCorrectionT2 = new AffineTransform3D();
// get Overlap Bounding Box
final List<List<ViewId>> views = new ArrayList<>();
views.add( new ArrayList<>(viewIdsA.getViews()) );
views.add( new ArrayList<>(viewIdsB.getViews()) );
BoundingBoxMaximalGroupOverlap< ViewId > bbDet = new BoundingBoxMaximalGroupOverlap<ViewId>( views, sd, vrs );
BoundingBox bbOverlap = bbDet.estimate( "Max Overlap" );
// this should be caught outside of this method already, but check nonetheless
if (bbOverlap == null)
return null;
// get one image per group
final RandomAccessibleInterval<T> img1 = gva.aggregate( viewIdsA, sd, downsampleFactors, dsCorrectionT1 );
final RandomAccessibleInterval<T> img2 = gva.aggregate( viewIdsB, sd, downsampleFactors, dsCorrectionT2 );
if (img1 == null || img2 == null)
{
IOFunctions.println( "WARNING: Tried to open missing View when computing Stitching for " + viewIdsA + " and " +
viewIdsB + ". No link between those could be determined");
return null;
}
// get translations
// TODO: is the 2d check here meaningful?
boolean is2d = img1.numDimensions() == 2;
Pair< AffineGet, TranslationGet > t1 = TransformTools.getInitialTransforms( vrs.getViewRegistration(viewIdsA.iterator().next()), is2d, dsCorrectionT1 );
Pair< AffineGet, TranslationGet > t2 = TransformTools.getInitialTransforms( vrs.getViewRegistration(viewIdsB.iterator().next()), is2d, dsCorrectionT2 );
final Pair< AffineTransform, Double > result = PairwiseStitching.getShiftLucasKanade( img1, img2, t1.getB(), t2.getB(), params, service );
if (result == null)
return null;
// TODO: is scaling just the translational part okay here?
for (int i = 0; i< result.getA().numDimensions(); ++i)
result.getA().set( result.getA().get(i, result.getA().numDimensions()) * downsampleFactors[i], i, result.getA().numDimensions() );
// TODO (?): Different translational part of downsample Transformations should be considered via TransformTools.getInitialTransforms
// we probalbly do not have to correct for them ?
// NB: as we will deal in global coordinates, not pixel coordinates in global optimization,
// calculate global R' = VT^-1 * R * VT from pixel transformation R
ViewRegistration vrOld = vrs.getViewRegistration(viewIdsB.iterator().next());
AffineTransform3D resTransform = new AffineTransform3D();
resTransform.set( result.getA().getRowPackedCopy() );
resTransform.concatenate( vrOld.getModel().inverse() );
resTransform.preConcatenate( vrOld.getModel() );
IOFunctions.println("resulting transformation (pixel coordinates): " + Util.printCoordinates(result.getA().getRowPackedCopy()));
IOFunctions.println("resulting transformation (global coordinates): " + Util.printCoordinates(resTransform.getRowPackedCopy()));
return new ValuePair<>( new ValuePair<>( resTransform, result.getB() ), bbOverlap );
}
示例7: openVirtuallyFused
import mpicbg.spim.data.registration.ViewRegistrations; //导入方法依赖的package包/类
public static <S extends AbstractSequenceDescription< ?,? extends BasicViewDescription<? extends BasicViewSetup>, ? >>
List<RandomAccessibleInterval< FloatType >> openVirtuallyFused(
S sd,
ViewRegistrations vrs,
Collection<? extends Collection<ViewId>> views,
Interval boundingBox,
double[] downsamplingFactors)
{
final BasicImgLoader imgLoader = sd.getImgLoader();
final List<RandomAccessibleInterval< FloatType >> openImgs = new ArrayList<>();
final Interval bbSc = TransformVirtual.scaleBoundingBox( new FinalInterval( boundingBox ), inverse( downsamplingFactors ));
final long[] dim = new long[ bbSc.numDimensions() ];
bbSc.dimensions( dim );
for (Collection<ViewId> viewGroup : views)
{
final ArrayList< RandomAccessibleInterval< FloatType > > images = new ArrayList<>();
final ArrayList< RandomAccessibleInterval< FloatType > > weights = new ArrayList<>();
for ( final ViewId viewId : viewGroup )
{
final ViewRegistration vr = vrs.getViewRegistration( viewId );
vr.updateModel();
AffineTransform3D model = vr.getModel();
final float[] blending = Util.getArrayFromValue( FusionTools.defaultBlendingRange, 3 );
final float[] border = Util.getArrayFromValue( FusionTools.defaultBlendingBorder, 3 );
model = model.copy();
TransformVirtual.scaleTransform( model, inverse(downsamplingFactors) );
final RandomAccessibleInterval inputImg = DownsampleTools.openDownsampled( imgLoader, viewId, model );
System.out.println( model.inverse() );
FusionTools.adjustBlending( sd.getViewDescriptions().get( viewId ), blending, border, model );
images.add( TransformView.transformView( inputImg, model, bbSc, 0, 1 ) );
weights.add( TransformWeight.transformBlending( inputImg, border, blending, model, bbSc ) );
}
openImgs.add( new FusedRandomAccessibleInterval( new FinalInterval( dim ), images, weights ) );
}
return openImgs;
}
示例8: setModelToIdentity
import mpicbg.spim.data.registration.ViewRegistrations; //导入方法依赖的package包/类
public static void setModelToIdentity( final SpimData spimData, final ViewId viewId )
{
final ViewRegistrations viewRegistrations = spimData.getViewRegistrations();
final ViewRegistration r = viewRegistrations.getViewRegistration( viewId );
r.identity();
}
示例9: getInterestPoints
import mpicbg.spim.data.registration.ViewRegistrations; //导入方法依赖的package包/类
/**
* Creates lists of input points for the registration, based on the current transformation of the views
*
* Note: this always duplicates the location array from the input List> InterestPoint < !!!
*
* @param timepoint
*/
protected HashMap< ViewId, MatchPointList > getInterestPoints( final TimePoint timepoint )
{
final HashMap< ViewId, MatchPointList > interestPoints = new HashMap< ViewId, MatchPointList >();
final ViewRegistrations registrations = spimData.getViewRegistrations();
final ViewInterestPoints interestpoints = spimData.getViewInterestPoints();
for ( final ViewDescription vd : SpimData2.getAllViewIdsForTimePointSorted( spimData, viewIdsToProcess, timepoint) )
{
if ( !vd.isPresent() )
continue;
final ChannelProcess c = getChannelProcessForChannel( channelsToProcess, vd.getViewSetup().getChannel() );
// no registration for this viewdescription
if ( c == null )
continue;
final Angle a = vd.getViewSetup().getAngle();
final Illumination i = vd.getViewSetup().getIllumination();
// assemble a new list
final ArrayList< InterestPoint > list = new ArrayList< InterestPoint >();
// check the existing lists of points
final ViewInterestPointLists lists = interestpoints.getViewInterestPointLists( vd );
if ( !lists.contains( c.getLabel() ) )
{
IOFunctions.println( "Interest points for label '" + c.getLabel() + "' not found for timepoint: " + timepoint.getId() + " angle: " +
a.getId() + " channel: " + c.getChannel().getId() + " illum: " + i.getId() );
continue;
}
if ( lists.getInterestPointList( c.getLabel() ).getInterestPoints() == null )
{
if ( !lists.getInterestPointList( c.getLabel() ).loadInterestPoints() )
{
IOFunctions.println( "Interest points for label '" + c.getLabel() + "' could not be loaded for timepoint: " + timepoint.getId() + " angle: " +
a.getId() + " channel: " + c.getChannel().getId() + " illum: " + i.getId() );
continue;
}
}
final List< InterestPoint > ptList = lists.getInterestPointList( c.getLabel() ).getInterestPoints();
final ViewRegistration r = registrations.getViewRegistration( vd );
r.updateModel();
final AffineTransform3D m = r.getModel();
for ( final InterestPoint p : ptList )
{
final double[] l = new double[ 3 ];
m.apply( p.getL(), l );
list.add( new InterestPoint( p.getId(), l ) );
}
interestPoints.put( vd, new MatchPointList( list, c ) );
}
return interestPoints;
}