本文整理汇总了Java中boofcv.core.image.border.BorderType类的典型用法代码示例。如果您正苦于以下问题:Java BorderType类的具体用法?Java BorderType怎么用?Java BorderType使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BorderType类属于boofcv.core.image.border包,在下文中一共展示了BorderType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: generalized
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
public static <T extends ImageSingleBand, D extends ImageSingleBand>
void generalized( T input )
{
Class<T> inputType = (Class<T>)input.getClass();
Class<D> derivType = GImageDerivativeOps.getDerivativeType(inputType);
T blurred = GeneralizedImageOps.createSingleBand(inputType, input.width, input.height);
D derivX = GeneralizedImageOps.createSingleBand(derivType, input.width, input.height);
D derivY = GeneralizedImageOps.createSingleBand(derivType, input.width, input.height);
// Gaussian blur: Convolve a Gaussian kernel
GBlurImageOps.gaussian(input, blurred, -1, blurRadius, null);
// Calculate image's derivative
GImageDerivativeOps.sobel(blurred, derivX, derivY, BorderType.EXTENDED);
// display the results
BufferedImage outputImage = VisualizeImageData.colorizeSign(derivX,null,-1);
ShowImages.showWindow(outputImage,"Generalized "+inputType.getSimpleName());
}
示例2: nogenerics
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
public static void nogenerics( ImageSingleBand input )
{
Class inputType = input.getClass();
Class derivType = GImageDerivativeOps.getDerivativeType(inputType);
ImageSingleBand blurred = GeneralizedImageOps.createSingleBand(inputType, input.width, input.height);
ImageSingleBand derivX = GeneralizedImageOps.createSingleBand(derivType, input.width, input.height);
ImageSingleBand derivY = GeneralizedImageOps.createSingleBand(derivType, input.width, input.height);
// Gaussian blur: Convolve a Gaussian kernel
GBlurImageOps.gaussian(input, blurred, -1, blurRadius, null);
// Calculate image's derivative
GImageDerivativeOps.sobel(blurred, derivX, derivY, BorderType.EXTENDED);
// display the results
BufferedImage outputImage = VisualizeImageData.colorizeSign(derivX,null,-1);
ShowImages.showWindow(outputImage,"Generalized "+inputType.getSimpleName());
}
示例3: removeRadialImage
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
/**
* Creates an {@Link ImageDistort} which removes radial distortion. How pixels outside the image are handled
* is specified by the BorderType. If BorderType.VALUE then pixels outside the image will be filled in with a
* value of 0. For viewing purposes it is recommended that BorderType.VALUE be used and BorderType.EXTENDED
* in computer vision applications. VALUE creates harsh edges which can cause false positives
* when detecting features, which EXTENDED minimizes.
*
* @param param Intrinsic camera parameters
* @param imageType Type of single band image being processed
* @param borderType Specifies how the image border is handled.
* @return Image distort that removes radial distortion
*/
public static <T extends ImageSingleBand> ImageDistort<T>
removeRadialImage(IntrinsicParameters param, BorderType borderType, Class<T> imageType)
{
InterpolatePixel<T> interp = FactoryInterpolation.bilinearPixel(imageType);
ImageBorder<T> border;
if( borderType == BorderType.VALUE )
border = FactoryImageBorder.value(imageType, 0);
else
border = FactoryImageBorder.general(imageType,borderType);
// only compute the transform once
ImageDistort<T> ret = FactoryDistort.distortCached(interp, border, imageType);
PointTransform_F32 transform = transformPixelToRadial_F32(param);
ret.setModel(new PointToPixelTransform_F32(transform));
return ret;
}
示例4: PyramidDiscreteSampleBlur
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
/**
*
* @param kernel A blur kernel
* @param sigma The effective amount of Gaussian blur the kernel applies
* @param imageType Type of image processed
* @param saveOriginalReference If a reference to the full resolution image should be saved instead of copied.
* Set to false if you don't know what you are doing.
* @param scaleFactors Scale factor for each layer in the pyramid relative to the input layer
*/
public PyramidDiscreteSampleBlur(Kernel1D kernel, double sigma, Class<T> imageType,
boolean saveOriginalReference, int... scaleFactors)
{
super(imageType,saveOriginalReference,scaleFactors);
horizontal = FactoryConvolveDown.convolve(kernel,imageType,imageType,
BorderType.NORMALIZED,true,1);
vertical = FactoryConvolveDown.convolve(kernel,imageType,imageType,
BorderType.NORMALIZED,false,1);
sigmas = new double[ scaleFactors.length ];
sigmas[0] = 0;
for( int i = 1; i < sigmas.length; i++ ) {
// blur in previous layer
double prev = sigmas[i-1];
// the effective amount of blur applied to previous layer while being down sampled
double applied = sigma*scaleFactors[i-1];
// The amount of blur which has been applied to this layer
sigmas[i] = Math.sqrt(prev*prev + applied*applied);
}
}
示例5: createDefaultShrinkTransform
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
/**
* Default wavelet transform used for denoising images.
*/
private static WaveletTransform createDefaultShrinkTransform(ImageTypeInfo imageType, int numLevels,
double minPixelValue , double maxPixelValue ) {
WaveletTransform descTran;
if( !imageType.isInteger()) {
WaveletDescription<WlCoef_F32> waveletDesc_F32 = FactoryWaveletDaub.daubJ_F32(4);
descTran = FactoryWaveletTransform.create_F32(waveletDesc_F32,numLevels,
(float)minPixelValue,(float)maxPixelValue);
} else {
WaveletDescription<WlCoef_I32> waveletDesc_I32 = FactoryWaveletDaub.biorthogonal_I32(5, BorderType.REFLECT);
descTran = FactoryWaveletTransform.create_I(waveletDesc_I32,numLevels,
(int)minPixelValue,(int)maxPixelValue,imageType.getImageClass());
}
return descTran;
}
示例6: createDesc_F32
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
private WaveletDescription<WlCoef_F32> createDesc_F32(int offset, int length, BorderType type ) {
WlCoef_F32 forward = createRandomCoef_F32(offset, length);
WlBorderCoef<WlCoef_F32> inverse;
BorderIndex1D border;
if( type == BorderType.WRAP ) {
inverse = new WlBorderCoefStandard<WlCoef_F32>(forward);
border = new BorderIndex1D_Wrap();
} else {
inverse = createFixedCoef_F32(forward);
border = new BorderIndex1D_Reflect();
}
return new WaveletDescription<WlCoef_F32>(border,forward,inverse);
}
示例7: createDesc_I32
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
private WaveletDescription<WlCoef_I32> createDesc_I32(int offset, int length, BorderType type ) {
WlCoef_I32 forward = createRandomCoef_I32(offset, length);
BorderIndex1D border;
WlBorderCoef<WlCoef_I32> inverse;
if( type == BorderType.WRAP ) {
inverse = new WlBorderCoefStandard<WlCoef_I32>(forward);
border = new BorderIndex1D_Wrap();
} else {
inverse = createFixedCoef_I32(forward);
border = new BorderIndex1D_Reflect();
}
return new WaveletDescription<WlCoef_I32>(border,forward,inverse);
}
示例8: compute
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
@Override
protected float compute(T _img, float x, float y) {
ImageBorder<?> imgB = FactoryImageBorder.general(_img, BorderType.EXTENDED);
GImageSingleBand img = FactoryGImageSingleBand.wrap(imgB);
int gX = (int) x;
int gY = (int) y;
float v0 = img.get(gX, gY).floatValue();
float v1 = img.get(gX + 1, gY).floatValue();
float v2 = img.get(gX, gY + 1).floatValue();
float v3 = img.get(gX + 1, gY + 1).floatValue();
x %= 1f;
y %= 1f;
float a = 1f - x;
float b = 1f - y;
return a * b * v0 + x * b * v1 + a * y * v2 + x * y * v3;
}
示例9: createInputParam
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
@Override
protected Object[][] createInputParam(Method candidate, Method validation) {
Class<?> paramTypes[] = candidate.getParameterTypes();
Object kernel = createKernel(paramTypes[0]);
ImageSingleBand src = ConvolutionTestHelper.createImage(validation.getParameterTypes()[1], width, height);
GImageMiscOps.fillUniform(src, rand, 0, 5);
ImageSingleBand dst = ConvolutionTestHelper.createImage(validation.getParameterTypes()[2], width, height);
Object[][] ret = new Object[1][paramTypes.length];
ret[0][0] = kernel;
ret[0][1] = src;
ret[0][2] = dst;
ret[0][3] = FactoryImageBorder.general(src, BorderType.EXTENDED);
return ret;
}
示例10: EquirectangularToPinhole
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
public EquirectangularToPinhole() {
ImageType<Planar<GrayU8>> imageType = ImageType.pl(3,GrayU8.class);
InterpolatePixel<Planar<GrayU8>> interp = FactoryInterpolation.
createPixel(0, 255, InterpolationType.BILINEAR, BorderType.EXTENDED, imageType);
distorter = FactoryDistort.distort(false,interp,imageType);
}
示例11: removePerspective
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
/**
* Removes perspective distortion. 4 points must be in 'this' image must be in clockwise order.
*
* @param outWidth Width of output image
* @param outHeight Height of output image
* @return Image with perspective distortion removed
*/
public SimpleGray removePerspective( int outWidth , int outHeight,
double x0, double y0,
double x1, double y1,
double x2, double y2,
double x3, double y3 )
{
ImageGray output = (ImageGray)image.createNew(outWidth,outHeight);
// Homography estimation algorithm. Requires a minimum of 4 points
Estimate1ofEpipolar computeHomography = FactoryMultiView.computeHomography(true);
// Specify the pixel coordinates from destination to target
ArrayList<AssociatedPair> associatedPairs = new ArrayList<AssociatedPair>();
associatedPairs.add(new AssociatedPair(new Point2D_F64(0,0),new Point2D_F64(x0,y0)));
associatedPairs.add(new AssociatedPair(new Point2D_F64(outWidth-1,0),new Point2D_F64(x1,y1)));
associatedPairs.add(new AssociatedPair(new Point2D_F64(outWidth-1,outHeight-1),new Point2D_F64(x2,y2)));
associatedPairs.add(new AssociatedPair(new Point2D_F64(0,outHeight-1),new Point2D_F64(x3,y3)));
// Compute the homography
DenseMatrix64F H = new DenseMatrix64F(3,3);
computeHomography.process(associatedPairs, H);
// Create the transform for distorting the image
PointTransformHomography_F32 homography = new PointTransformHomography_F32(H);
PixelTransform2_F32 pixelTransform = new PointToPixelTransform_F32(homography);
// Apply distortion and show the results
DistortImageOps.distortSingle(image, output, pixelTransform, InterpolationType.BILINEAR, BorderType.SKIP);
return new SimpleGray(output);
}
示例12: createImages
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
/**
* Load and generate images
*/
public void createImages() {
image = UtilImageIO.loadImage("../data/evaluation/standard/barbara.png");
gray = ConvertBufferedImage.convertFromSingle(image, null, ImageUInt8.class);
derivX = GeneralizedImageOps.createSingleBand(ImageSInt16.class, gray.getWidth(), gray.getHeight());
derivY = GeneralizedImageOps.createSingleBand(ImageSInt16.class, gray.getWidth(), gray.getHeight());
GImageDerivativeOps.sobel(gray,derivX,derivY, BorderType.EXTENDED);
}
示例13: HomographyBilinear_F32
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
public HomographyBilinear_F32(Homography2D_F32 affine) {
PixelTransform_F32 tran = new PixelTransformHomography_F32(affine);
InterpolatePixel<T> interp = FactoryInterpolation.bilinearPixel(imageType);
ImageBorder<T> border = FactoryImageBorder.general(imageType, BorderType.EXTENDED);
alg = FactoryDistort.distort(interp, border, imageType);
alg.setModel(tran);
}
示例14: HomographyBilinearCrop_F32
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
public HomographyBilinearCrop_F32(Homography2D_F32 affine) {
PixelTransform_F32 tran = new PixelTransformHomography_F32(affine);
InterpolatePixel<T> interp = FactoryInterpolation.bilinearPixel(imageType);
ImageBorder<T> border = FactoryImageBorder.general(imageType, BorderType.EXTENDED);
alg = FactoryDistort.distort(interp,border,imageType);
alg.setModel(tran);
}
示例15: MapBilinear_F32
import boofcv.core.image.border.BorderType; //导入依赖的package包/类
public MapBilinear_F32( Homography2D_F32 homography ) {
PixelTransform_F32 tran = new PixelTransformHomography_F32(homography);
InterpolatePixel<T> interp = FactoryInterpolation.bilinearPixel(imageType);
ImageBorder<T> border = FactoryImageBorder.general(imageType, BorderType.EXTENDED);
alg = FactoryDistort.distortCached(interp,border,imageType);
alg.setModel(tran);
}