当前位置: 首页>>代码示例>>Java>>正文


Java Mat.channels方法代码示例

本文整理汇总了Java中org.opencv.core.Mat.channels方法的典型用法代码示例。如果您正苦于以下问题:Java Mat.channels方法的具体用法?Java Mat.channels怎么用?Java Mat.channels使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.opencv.core.Mat的用法示例。


在下文中一共展示了Mat.channels方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: SimplestColorBalance

import org.opencv.core.Mat; //导入方法依赖的package包/类
/**
 * Simplest Color Balance. Performs color balancing via histogram
 * normalization.
 *
 * @param img input color or gray scale image
 * @param percent controls the percentage of pixels to clip to white and black. (normally, choose 1~10)
 * @return Balanced image in CvType.CV_32F
 */
public static Mat SimplestColorBalance(Mat img, int percent) {
	if (percent <= 0)
		percent = 5;
	img.convertTo(img, CvType.CV_32F);
	List<Mat> channels = new ArrayList<>();
	int rows = img.rows(); // number of rows of image
	int cols = img.cols(); // number of columns of image
	int chnls = img.channels(); //  number of channels of image
	double halfPercent = percent / 200.0;
	if (chnls == 3) Core.split(img, channels);
	else channels.add(img);
	List<Mat> results = new ArrayList<>();
	for (int i = 0; i < chnls; i++) {
		// find the low and high precentile values (based on the input percentile)
		Mat flat = new Mat();
		channels.get(i).reshape(1, 1).copyTo(flat);
		Core.sort(flat, flat, Core.SORT_ASCENDING);
		double lowVal = flat.get(0, (int) Math.floor(flat.cols() * halfPercent))[0];
		double topVal = flat.get(0, (int) Math.ceil(flat.cols() * (1.0 - halfPercent)))[0];
		// saturate below the low percentile and above the high percentile
		Mat channel = channels.get(i);
		for (int m = 0; m < rows; m++) {
			for (int n = 0; n < cols; n++) {
				if (channel.get(m, n)[0] < lowVal) channel.put(m, n, lowVal);
				if (channel.get(m, n)[0] > topVal) channel.put(m, n, topVal);
			}
		}
		Core.normalize(channel, channel, 0.0, 255.0 / 2, Core.NORM_MINMAX);
		channel.convertTo(channel, CvType.CV_32F);
		results.add(channel);
	}
	Mat outval = new Mat();
	Core.merge(results, outval);
	return outval;
}
 
开发者ID:IsaacChanghau,项目名称:OptimizedImageEnhance,代码行数:44,代码来源:Filters.java

示例2: matToBufferedImage

import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Image matToBufferedImage(Mat m)
{
    // just a simple convertor from web, this code is the fastest one
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if ( m.channels() > 1 ) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }
    int bufferSize = m.channels()*m.cols()*m.rows();
    byte [] b = new byte[bufferSize];
    m.get(0,0,b);
    BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;
    
}
 
开发者ID:Plasmoxy,项目名称:AquamarineLake,代码行数:17,代码来源:CVUtility.java

示例3: hsvToBgrScalar

import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Scalar hsvToBgrScalar(Scalar hsvin)
{
    Mat bgr = new Mat();
    Mat hsv = new Mat(1, 1, CvType.CV_8UC3, hsvin);
    Imgproc.cvtColor(hsv, bgr, Imgproc.COLOR_HSV2BGR);
    byte[] bgr_data = new byte[(int)(bgr.total() * bgr.channels())];
    bgr.get(0,0, bgr_data);
    return new Scalar(bgr_data[0], bgr_data[1], bgr_data[2]);
}
 
开发者ID:Plasmoxy,项目名称:AquamarineLake,代码行数:10,代码来源:CVUtility.java

示例4: matToBufferedImage

import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Image matToBufferedImage(Mat m)
{
    // just a simple convertor from web, this code is the fastest one
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if ( m.channels() > 1 ) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }
    int bufferSize = m.channels()*m.cols()*m.rows();
    byte [] b = new byte[bufferSize];
    m.get(0,0,b);
    BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;

}
 
开发者ID:Plasmoxy,项目名称:AquamarineLake,代码行数:17,代码来源:CVUtility.java

示例5: getOpenCvLines

import org.opencv.core.Mat; //导入方法依赖的package包/类
public static List<Line> getOpenCvLines(Mat original, int scale, double minLength) {
    Mat raw = new Mat();
    Imgproc.resize(original.clone(), raw, new Size((int) (original.size().width/scale), (int) (original.size().height/scale)));
    if(raw.channels() > 1) {
        Imgproc.cvtColor(raw, raw, Imgproc.COLOR_RGB2GRAY);
    }
    Imgproc.equalizeHist(raw, raw);
    Imgproc.blur(raw, raw, new Size(3,3));
    //Line Segment Detection 2
    Mat linesM1 = new Mat();
    //LineSegmentDetector detector = Imgproc.createLineSegmentDetector(Imgproc.LSD_REFINE_ADV, 0.6, 0.3, 2.6, 22.5, 0, 0.3,256);
    //LineSegmentDetector detector = Imgproc.createLineSegmentDetector(Imgproc.LSD_REFINE_STD, 0.5, 0.4,2.0, 19.5, 0, 0.6, 32);
    //Reference for final glyph detection

    detector.detect(raw, linesM1);
    ArrayList<Line> lines = new ArrayList<Line>();
    for (int x = 0; x < linesM1.rows(); x++)  {
        double[] vec = linesM1.get(x, 0);
        Point start = new Point(vec[0],vec[1]);
        Point end = new Point(vec[2], vec[3]);
        Line line = new Line(start, end);
        line = new Line(new Point((int)line.x1*scale, (int) line.y1*scale), new Point((int)line.x2*scale, (int)line.y2*scale));
        if(line.length() > minLength) lines.add(line);
    }

    raw.release();
    linesM1.release();

    return lines;
}
 
开发者ID:GTHSRobotics,项目名称:DogeCV,代码行数:31,代码来源:Lines.java

示例6: Mat2BufferedImage

import org.opencv.core.Mat; //导入方法依赖的package包/类
public static BufferedImage Mat2BufferedImage(Mat m) {
	//Method converts a Mat to a Buffered Image
	int type = BufferedImage.TYPE_BYTE_GRAY;
     if ( m.channels() > 1 ) {
         type = BufferedImage.TYPE_3BYTE_BGR;
     }
     int bufferSize = m.channels()*m.cols()*m.rows();
     byte [] b = new byte[bufferSize];
     m.get(0,0,b); // get all the pixels
     BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
     final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
     System.arraycopy(b, 0, targetPixels, 0, b.length);  
     return image;
}
 
开发者ID:TheoreticallyNick,项目名称:Face-Detection-and-Tracking,代码行数:15,代码来源:FaceTrackMain.java

示例7: mat2image

import org.opencv.core.Mat; //导入方法依赖的package包/类
public BufferedImage mat2image(Mat mat){
    if (mat.channels()!=3  && mat.channels()!=1) return null;
    nByte = mat.channels();
    allocateSpace(mat);
    mat.get(0, 0, data);
    image.getRaster().setDataElements(0, 0, mat.cols(),mat.rows(), data);
    return image;
}
 
开发者ID:TheoreticallyNick,项目名称:Face-Detection-and-Tracking,代码行数:9,代码来源:ImageConverter.java

示例8: blkEstimateEachChannel

import org.opencv.core.Mat; //导入方法依赖的package包/类
public static double blkEstimateEachChannel(Mat blkIm, double airlight, double lambda, double fTrans) {
	double Trans = 0.0;
	double nTrans = Math.floor(1.0 / fTrans * 128);
	double fMinCost = Double.MAX_VALUE;
	int numberOfPixels = blkIm.rows() * blkIm.cols() * blkIm.channels();
	int nCounter = 0;
	while (nCounter < (int) (1 - fTrans) * 10) {
		// initial dehazing process to calculate the loss information
		Mat channel = blkIm.clone();
		channel = preDehaze(channel, airlight, nTrans);
		// find the pixels with over-255 value and below-0 value, and
		// calculate the sum of information loss
		double nSumOfLoss = 0.0;
		for (int i = 0; i < channel.rows(); i++) {
			for (int j = 0; j < channel.cols(); j++) {
				if (channel.get(i, j)[0] > 255.0) nSumOfLoss += (channel.get(i, j)[0] - 255.0) * (channel.get(i, j)[0] - 255.0);
				else if (channel.get(i, j)[0] < 0.0) nSumOfLoss += channel.get(i, j)[0] * channel.get(i, j)[0];
			}
		}
		// calculate the value of sum of square out
		double nSumOfSquareOuts = Core.sumElems(channel.mul(channel)).val[0];
		// calculate the value of sum of out
		double nSumOfOuts = Core.sumElems(channel).val[0];
		// calculate the mean value of the block image
		double fMean = nSumOfOuts / numberOfPixels;
		// calculate the cost function
		double fCost = lambda * nSumOfLoss / numberOfPixels - (nSumOfSquareOuts / numberOfPixels - fMean * fMean);
		// find the minimum cost and the related transmission
		if (nCounter == 0 || fMinCost > fCost) {
			fMinCost = fCost;
			Trans = fTrans;
		}
		fTrans = fTrans + 0.1;
		nTrans = 1.0 / fTrans * 128;
		nCounter = nCounter + 1;
	}
	return Trans;
}
 
开发者ID:IsaacChanghau,项目名称:OptimizedImageEnhance,代码行数:39,代码来源:BlkTransEstimate.java

示例9: toBufferedImage

import org.opencv.core.Mat; //导入方法依赖的package包/类
public BufferedImage toBufferedImage(Mat m) {
	int type = BufferedImage.TYPE_BYTE_GRAY;
	if (m.channels() > 1) type = BufferedImage.TYPE_3BYTE_BGR;
	int bufferSize = m.channels() * m.cols() * m.rows();
	byte[] b = new byte[bufferSize];
	m.get(0, 0, b); // get all the pixels
	BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
	final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
	System.arraycopy(b, 0, targetPixels, 0, b.length);
	return image;
}
 
开发者ID:IsaacChanghau,项目名称:OptimizedImageEnhance,代码行数:12,代码来源:ImShow.java

示例10: mat2BI

import org.opencv.core.Mat; //导入方法依赖的package包/类
public static BufferedImage mat2BI(Mat matrix) {
	final int cols = matrix.cols();
	final int rows = matrix.rows();
	final int elemSize = (int) matrix.elemSize();
	final byte[] data = new byte[cols * rows * elemSize];
	int type;
	matrix.get(0, 0, data);
	switch (matrix.channels()) {
	case 1:
		type = BufferedImage.TYPE_BYTE_GRAY;
		break;
	case 3:
		type = BufferedImage.TYPE_3BYTE_BGR;
		// bgr to rgb
		byte b;
		for (int i = 0; i < data.length; i = i + 3) {
			b = data[i];
			data[i] = data[i + 2];
			data[i + 2] = b;
		}
		break;
	default:
		return null;
	}
	final BufferedImage image2 = new BufferedImage(cols, rows, type);
	image2.getRaster().setDataElements(0, 0, cols, rows, data);
	return image2;
}
 
开发者ID:zylo117,项目名称:SpotSpotter,代码行数:29,代码来源:Mat2BufferedImage.java

示例11: covertImage2Gray

import org.opencv.core.Mat; //导入方法依赖的package包/类
/**
 * 将源图像转换为灰度图
 * @param srcImg
 * @return
 */
public static Mat covertImage2Gray(Mat srcImg) {
	Mat gray = new Mat(); // 存储灰度图
	if (srcImg.channels() == 3) {
		Imgproc.cvtColor(srcImg, gray, Imgproc.COLOR_BGR2GRAY);
	} else if (srcImg.channels() == 4) {
		Imgproc.cvtColor(srcImg, gray, Imgproc.COLOR_BGRA2GRAY);
	} else {
		gray = srcImg;
	}
	return gray;
}
 
开发者ID:IaHehe,项目名称:classchecks,代码行数:17,代码来源:ImgprocessUtils.java

示例12: matToBufferedImage

import org.opencv.core.Mat; //导入方法依赖的package包/类
public static BufferedImage matToBufferedImage(Mat matrix) {
    int cols = matrix.cols();
    int rows = matrix.rows();
    int elemSize = (int) matrix.elemSize();
    byte[] data = new byte[cols * rows * elemSize];
    int type;
    matrix.get(0, 0, data);
    switch (matrix.channels()) {
        case 1:
            type = BufferedImage.TYPE_BYTE_GRAY;
            break;
        case 3:
            type = BufferedImage.TYPE_3BYTE_BGR;
            // bgr to rgb  
            byte b;
            for (int i = 0; i < data.length; i = i + 3) {
                b = data[i];
                data[i] = data[i + 2];
                data[i + 2] = b;
            }
            break;
        default:
            return null;
    }
    BufferedImage bimg = new BufferedImage(cols, rows, type);
    bimg.getRaster().setDataElements(0, 0, cols, rows, data);
    return bimg;
}
 
开发者ID:lupino22,项目名称:kronometer,代码行数:29,代码来源:Utility.java

示例13: enhancedMatToBufferedImage

import org.opencv.core.Mat; //导入方法依赖的package包/类
public Image enhancedMatToBufferedImage(Mat m){
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if ( m.channels() > 1 ) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }
    int bufferSize = m.channels()*m.cols()*m.rows();
    byte [] b = new byte[bufferSize];
    m.get(0,0,b); // get all the pixels
    BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;

}
 
开发者ID:Plasmoxy,项目名称:AquamarineLake,代码行数:15,代码来源:DrawPanel.java

示例14: unevenLightCompensate

import org.opencv.core.Mat; //导入方法依赖的package包/类
/**
 * 其主要思路为:
	1、求取源图I的平均灰度,并记录rows和cols;
	2、按照一定大小,分为N*M个方块,求出每块的平均值,得到子块的亮度矩阵D;
	3、用矩阵D的每个元素减去源图的平均灰度,得到子块的亮度差值矩阵E;
	4、用双立方差值法,将矩阵E差值成与源图一样大小的亮度分布矩阵R;
	5、得到矫正后的图像result=I-R;
* @Title: unevenLightCompensate 
* @Description: 光线补偿 
* @param image
* @param blockSize
* void 
* @throws
 */
public static void unevenLightCompensate(Mat image, int blockSize) {
	if(image.channels() == 3) {
		Imgproc.cvtColor(image, image, 7);
	}
	double average = Core.mean(image).val[0];
	Scalar scalar = new Scalar(average);
	int rowsNew = (int) Math.ceil((double)image.rows() / (double)blockSize);
	int colsNew = (int) Math.ceil((double)image.cols() / (double)blockSize);
	Mat blockImage = new Mat();
	blockImage = Mat.zeros(rowsNew, colsNew, CvType.CV_32FC1);
	for(int i = 0; i < rowsNew; i ++) {
		for(int j = 0; j < colsNew; j ++) {
			int rowmin = i * blockSize;
			int rowmax = (i + 1) * blockSize;
			if(rowmax > image.rows()) rowmax = image.rows();
			int colmin = j * blockSize;
			int colmax = (j +1) * blockSize;
			if(colmax > image.cols()) colmax = image.cols();
			Range rangeRow = new Range(rowmin, rowmax);
			Range rangeCol = new Range(colmin, colmax);
			Mat imageROI = new Mat(image, rangeRow, rangeCol);
			double temaver = Core.mean(imageROI).val[0];
			blockImage.put(i, j, temaver);
		}
	}
	
	Core.subtract(blockImage, scalar, blockImage);
	Mat blockImage2 = new Mat();
	int INTER_CUBIC = 2;
	Imgproc.resize(blockImage, blockImage2, image.size(), 0, 0, INTER_CUBIC);
	Mat image2 = new Mat();
	image.convertTo(image2, CvType.CV_32FC1);
	Mat dst = new Mat();
	Core.subtract(image2, blockImage2, dst);
	dst.convertTo(image, CvType.CV_8UC1);
}
 
开发者ID:IaHehe,项目名称:classchecks,代码行数:51,代码来源:ImgprocessUtils.java

示例15: norm_0_255

import org.opencv.core.Mat; //导入方法依赖的package包/类
/**
 * 图像归一化
* @Title: norm_0_255 
* @Description: TODO(这里用一句话描述这个方法的作用) 
* @param src
* @return
* Mat 
* @throws
 */
public static Mat norm_0_255(Mat src) {
	// 创建和返回一个归一化后的图像矩阵
	Mat dst = new Mat();
	switch(src.channels()) {
		case 1: Core.normalize(src, dst, 0, 255, Core.NORM_MINMAX, CvType.CV_8UC1); break;
		case 3: Core.normalize(src, dst, 0, 255, Core.NORM_MINMAX, CvType.CV_8UC3); break;
		default: src.copyTo(dst);break;
	}
	return dst;
}
 
开发者ID:IaHehe,项目名称:classchecks,代码行数:20,代码来源:ImgprocessUtils.java


注:本文中的org.opencv.core.Mat.channels方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。