當前位置: 首頁>>代碼示例>>Java>>正文


Java Mat.channels方法代碼示例

本文整理匯總了Java中org.opencv.core.Mat.channels方法的典型用法代碼示例。如果您正苦於以下問題:Java Mat.channels方法的具體用法?Java Mat.channels怎麽用?Java Mat.channels使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.opencv.core.Mat的用法示例。


在下文中一共展示了Mat.channels方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: SimplestColorBalance

import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
 * Simplest Color Balance. Performs color balancing via histogram
 * normalization.
 *
 * @param img input color or gray scale image
 * @param percent controls the percentage of pixels to clip to white and black. (normally, choose 1~10)
 * @return Balanced image in CvType.CV_32F
 */
public static Mat SimplestColorBalance(Mat img, int percent) {
	if (percent <= 0)
		percent = 5;
	img.convertTo(img, CvType.CV_32F);
	List<Mat> channels = new ArrayList<>();
	int rows = img.rows(); // number of rows of image
	int cols = img.cols(); // number of columns of image
	int chnls = img.channels(); //  number of channels of image
	double halfPercent = percent / 200.0;
	if (chnls == 3) Core.split(img, channels);
	else channels.add(img);
	List<Mat> results = new ArrayList<>();
	for (int i = 0; i < chnls; i++) {
		// find the low and high precentile values (based on the input percentile)
		Mat flat = new Mat();
		channels.get(i).reshape(1, 1).copyTo(flat);
		Core.sort(flat, flat, Core.SORT_ASCENDING);
		double lowVal = flat.get(0, (int) Math.floor(flat.cols() * halfPercent))[0];
		double topVal = flat.get(0, (int) Math.ceil(flat.cols() * (1.0 - halfPercent)))[0];
		// saturate below the low percentile and above the high percentile
		Mat channel = channels.get(i);
		for (int m = 0; m < rows; m++) {
			for (int n = 0; n < cols; n++) {
				if (channel.get(m, n)[0] < lowVal) channel.put(m, n, lowVal);
				if (channel.get(m, n)[0] > topVal) channel.put(m, n, topVal);
			}
		}
		Core.normalize(channel, channel, 0.0, 255.0 / 2, Core.NORM_MINMAX);
		channel.convertTo(channel, CvType.CV_32F);
		results.add(channel);
	}
	Mat outval = new Mat();
	Core.merge(results, outval);
	return outval;
}
 
開發者ID:IsaacChanghau,項目名稱:OptimizedImageEnhance,代碼行數:44,代碼來源:Filters.java

示例2: matToBufferedImage

import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Image matToBufferedImage(Mat m)
{
    // just a simple convertor from web, this code is the fastest one
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if ( m.channels() > 1 ) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }
    int bufferSize = m.channels()*m.cols()*m.rows();
    byte [] b = new byte[bufferSize];
    m.get(0,0,b);
    BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;
    
}
 
開發者ID:Plasmoxy,項目名稱:AquamarineLake,代碼行數:17,代碼來源:CVUtility.java

示例3: hsvToBgrScalar

import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Scalar hsvToBgrScalar(Scalar hsvin)
{
    Mat bgr = new Mat();
    Mat hsv = new Mat(1, 1, CvType.CV_8UC3, hsvin);
    Imgproc.cvtColor(hsv, bgr, Imgproc.COLOR_HSV2BGR);
    byte[] bgr_data = new byte[(int)(bgr.total() * bgr.channels())];
    bgr.get(0,0, bgr_data);
    return new Scalar(bgr_data[0], bgr_data[1], bgr_data[2]);
}
 
開發者ID:Plasmoxy,項目名稱:AquamarineLake,代碼行數:10,代碼來源:CVUtility.java

示例4: matToBufferedImage

import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Image matToBufferedImage(Mat m)
{
    // just a simple convertor from web, this code is the fastest one
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if ( m.channels() > 1 ) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }
    int bufferSize = m.channels()*m.cols()*m.rows();
    byte [] b = new byte[bufferSize];
    m.get(0,0,b);
    BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;

}
 
開發者ID:Plasmoxy,項目名稱:AquamarineLake,代碼行數:17,代碼來源:CVUtility.java

示例5: getOpenCvLines

import org.opencv.core.Mat; //導入方法依賴的package包/類
public static List<Line> getOpenCvLines(Mat original, int scale, double minLength) {
    Mat raw = new Mat();
    Imgproc.resize(original.clone(), raw, new Size((int) (original.size().width/scale), (int) (original.size().height/scale)));
    if(raw.channels() > 1) {
        Imgproc.cvtColor(raw, raw, Imgproc.COLOR_RGB2GRAY);
    }
    Imgproc.equalizeHist(raw, raw);
    Imgproc.blur(raw, raw, new Size(3,3));
    //Line Segment Detection 2
    Mat linesM1 = new Mat();
    //LineSegmentDetector detector = Imgproc.createLineSegmentDetector(Imgproc.LSD_REFINE_ADV, 0.6, 0.3, 2.6, 22.5, 0, 0.3,256);
    //LineSegmentDetector detector = Imgproc.createLineSegmentDetector(Imgproc.LSD_REFINE_STD, 0.5, 0.4,2.0, 19.5, 0, 0.6, 32);
    //Reference for final glyph detection

    detector.detect(raw, linesM1);
    ArrayList<Line> lines = new ArrayList<Line>();
    for (int x = 0; x < linesM1.rows(); x++)  {
        double[] vec = linesM1.get(x, 0);
        Point start = new Point(vec[0],vec[1]);
        Point end = new Point(vec[2], vec[3]);
        Line line = new Line(start, end);
        line = new Line(new Point((int)line.x1*scale, (int) line.y1*scale), new Point((int)line.x2*scale, (int)line.y2*scale));
        if(line.length() > minLength) lines.add(line);
    }

    raw.release();
    linesM1.release();

    return lines;
}
 
開發者ID:GTHSRobotics,項目名稱:DogeCV,代碼行數:31,代碼來源:Lines.java

示例6: Mat2BufferedImage

import org.opencv.core.Mat; //導入方法依賴的package包/類
public static BufferedImage Mat2BufferedImage(Mat m) {
	//Method converts a Mat to a Buffered Image
	int type = BufferedImage.TYPE_BYTE_GRAY;
     if ( m.channels() > 1 ) {
         type = BufferedImage.TYPE_3BYTE_BGR;
     }
     int bufferSize = m.channels()*m.cols()*m.rows();
     byte [] b = new byte[bufferSize];
     m.get(0,0,b); // get all the pixels
     BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
     final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
     System.arraycopy(b, 0, targetPixels, 0, b.length);  
     return image;
}
 
開發者ID:TheoreticallyNick,項目名稱:Face-Detection-and-Tracking,代碼行數:15,代碼來源:FaceTrackMain.java

示例7: mat2image

import org.opencv.core.Mat; //導入方法依賴的package包/類
public BufferedImage mat2image(Mat mat){
    if (mat.channels()!=3  && mat.channels()!=1) return null;
    nByte = mat.channels();
    allocateSpace(mat);
    mat.get(0, 0, data);
    image.getRaster().setDataElements(0, 0, mat.cols(),mat.rows(), data);
    return image;
}
 
開發者ID:TheoreticallyNick,項目名稱:Face-Detection-and-Tracking,代碼行數:9,代碼來源:ImageConverter.java

示例8: blkEstimateEachChannel

import org.opencv.core.Mat; //導入方法依賴的package包/類
public static double blkEstimateEachChannel(Mat blkIm, double airlight, double lambda, double fTrans) {
	double Trans = 0.0;
	double nTrans = Math.floor(1.0 / fTrans * 128);
	double fMinCost = Double.MAX_VALUE;
	int numberOfPixels = blkIm.rows() * blkIm.cols() * blkIm.channels();
	int nCounter = 0;
	while (nCounter < (int) (1 - fTrans) * 10) {
		// initial dehazing process to calculate the loss information
		Mat channel = blkIm.clone();
		channel = preDehaze(channel, airlight, nTrans);
		// find the pixels with over-255 value and below-0 value, and
		// calculate the sum of information loss
		double nSumOfLoss = 0.0;
		for (int i = 0; i < channel.rows(); i++) {
			for (int j = 0; j < channel.cols(); j++) {
				if (channel.get(i, j)[0] > 255.0) nSumOfLoss += (channel.get(i, j)[0] - 255.0) * (channel.get(i, j)[0] - 255.0);
				else if (channel.get(i, j)[0] < 0.0) nSumOfLoss += channel.get(i, j)[0] * channel.get(i, j)[0];
			}
		}
		// calculate the value of sum of square out
		double nSumOfSquareOuts = Core.sumElems(channel.mul(channel)).val[0];
		// calculate the value of sum of out
		double nSumOfOuts = Core.sumElems(channel).val[0];
		// calculate the mean value of the block image
		double fMean = nSumOfOuts / numberOfPixels;
		// calculate the cost function
		double fCost = lambda * nSumOfLoss / numberOfPixels - (nSumOfSquareOuts / numberOfPixels - fMean * fMean);
		// find the minimum cost and the related transmission
		if (nCounter == 0 || fMinCost > fCost) {
			fMinCost = fCost;
			Trans = fTrans;
		}
		fTrans = fTrans + 0.1;
		nTrans = 1.0 / fTrans * 128;
		nCounter = nCounter + 1;
	}
	return Trans;
}
 
開發者ID:IsaacChanghau,項目名稱:OptimizedImageEnhance,代碼行數:39,代碼來源:BlkTransEstimate.java

示例9: toBufferedImage

import org.opencv.core.Mat; //導入方法依賴的package包/類
public BufferedImage toBufferedImage(Mat m) {
	int type = BufferedImage.TYPE_BYTE_GRAY;
	if (m.channels() > 1) type = BufferedImage.TYPE_3BYTE_BGR;
	int bufferSize = m.channels() * m.cols() * m.rows();
	byte[] b = new byte[bufferSize];
	m.get(0, 0, b); // get all the pixels
	BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
	final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
	System.arraycopy(b, 0, targetPixels, 0, b.length);
	return image;
}
 
開發者ID:IsaacChanghau,項目名稱:OptimizedImageEnhance,代碼行數:12,代碼來源:ImShow.java

示例10: mat2BI

import org.opencv.core.Mat; //導入方法依賴的package包/類
public static BufferedImage mat2BI(Mat matrix) {
	final int cols = matrix.cols();
	final int rows = matrix.rows();
	final int elemSize = (int) matrix.elemSize();
	final byte[] data = new byte[cols * rows * elemSize];
	int type;
	matrix.get(0, 0, data);
	switch (matrix.channels()) {
	case 1:
		type = BufferedImage.TYPE_BYTE_GRAY;
		break;
	case 3:
		type = BufferedImage.TYPE_3BYTE_BGR;
		// bgr to rgb
		byte b;
		for (int i = 0; i < data.length; i = i + 3) {
			b = data[i];
			data[i] = data[i + 2];
			data[i + 2] = b;
		}
		break;
	default:
		return null;
	}
	final BufferedImage image2 = new BufferedImage(cols, rows, type);
	image2.getRaster().setDataElements(0, 0, cols, rows, data);
	return image2;
}
 
開發者ID:zylo117,項目名稱:SpotSpotter,代碼行數:29,代碼來源:Mat2BufferedImage.java

示例11: covertImage2Gray

import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
 * 將源圖像轉換為灰度圖
 * @param srcImg
 * @return
 */
public static Mat covertImage2Gray(Mat srcImg) {
	Mat gray = new Mat(); // 存儲灰度圖
	if (srcImg.channels() == 3) {
		Imgproc.cvtColor(srcImg, gray, Imgproc.COLOR_BGR2GRAY);
	} else if (srcImg.channels() == 4) {
		Imgproc.cvtColor(srcImg, gray, Imgproc.COLOR_BGRA2GRAY);
	} else {
		gray = srcImg;
	}
	return gray;
}
 
開發者ID:IaHehe,項目名稱:classchecks,代碼行數:17,代碼來源:ImgprocessUtils.java

示例12: matToBufferedImage

import org.opencv.core.Mat; //導入方法依賴的package包/類
public static BufferedImage matToBufferedImage(Mat matrix) {
    int cols = matrix.cols();
    int rows = matrix.rows();
    int elemSize = (int) matrix.elemSize();
    byte[] data = new byte[cols * rows * elemSize];
    int type;
    matrix.get(0, 0, data);
    switch (matrix.channels()) {
        case 1:
            type = BufferedImage.TYPE_BYTE_GRAY;
            break;
        case 3:
            type = BufferedImage.TYPE_3BYTE_BGR;
            // bgr to rgb  
            byte b;
            for (int i = 0; i < data.length; i = i + 3) {
                b = data[i];
                data[i] = data[i + 2];
                data[i + 2] = b;
            }
            break;
        default:
            return null;
    }
    BufferedImage bimg = new BufferedImage(cols, rows, type);
    bimg.getRaster().setDataElements(0, 0, cols, rows, data);
    return bimg;
}
 
開發者ID:lupino22,項目名稱:kronometer,代碼行數:29,代碼來源:Utility.java

示例13: enhancedMatToBufferedImage

import org.opencv.core.Mat; //導入方法依賴的package包/類
public Image enhancedMatToBufferedImage(Mat m){
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if ( m.channels() > 1 ) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }
    int bufferSize = m.channels()*m.cols()*m.rows();
    byte [] b = new byte[bufferSize];
    m.get(0,0,b); // get all the pixels
    BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;

}
 
開發者ID:Plasmoxy,項目名稱:AquamarineLake,代碼行數:15,代碼來源:DrawPanel.java

示例14: unevenLightCompensate

import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
 * 其主要思路為:
	1、求取源圖I的平均灰度,並記錄rows和cols;
	2、按照一定大小,分為N*M個方塊,求出每塊的平均值,得到子塊的亮度矩陣D;
	3、用矩陣D的每個元素減去源圖的平均灰度,得到子塊的亮度差值矩陣E;
	4、用雙立方差值法,將矩陣E差值成與源圖一樣大小的亮度分布矩陣R;
	5、得到矯正後的圖像result=I-R;
* @Title: unevenLightCompensate 
* @Description: 光線補償 
* @param image
* @param blockSize
* void 
* @throws
 */
public static void unevenLightCompensate(Mat image, int blockSize) {
	if(image.channels() == 3) {
		Imgproc.cvtColor(image, image, 7);
	}
	double average = Core.mean(image).val[0];
	Scalar scalar = new Scalar(average);
	int rowsNew = (int) Math.ceil((double)image.rows() / (double)blockSize);
	int colsNew = (int) Math.ceil((double)image.cols() / (double)blockSize);
	Mat blockImage = new Mat();
	blockImage = Mat.zeros(rowsNew, colsNew, CvType.CV_32FC1);
	for(int i = 0; i < rowsNew; i ++) {
		for(int j = 0; j < colsNew; j ++) {
			int rowmin = i * blockSize;
			int rowmax = (i + 1) * blockSize;
			if(rowmax > image.rows()) rowmax = image.rows();
			int colmin = j * blockSize;
			int colmax = (j +1) * blockSize;
			if(colmax > image.cols()) colmax = image.cols();
			Range rangeRow = new Range(rowmin, rowmax);
			Range rangeCol = new Range(colmin, colmax);
			Mat imageROI = new Mat(image, rangeRow, rangeCol);
			double temaver = Core.mean(imageROI).val[0];
			blockImage.put(i, j, temaver);
		}
	}
	
	Core.subtract(blockImage, scalar, blockImage);
	Mat blockImage2 = new Mat();
	int INTER_CUBIC = 2;
	Imgproc.resize(blockImage, blockImage2, image.size(), 0, 0, INTER_CUBIC);
	Mat image2 = new Mat();
	image.convertTo(image2, CvType.CV_32FC1);
	Mat dst = new Mat();
	Core.subtract(image2, blockImage2, dst);
	dst.convertTo(image, CvType.CV_8UC1);
}
 
開發者ID:IaHehe,項目名稱:classchecks,代碼行數:51,代碼來源:ImgprocessUtils.java

示例15: norm_0_255

import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
 * 圖像歸一化
* @Title: norm_0_255 
* @Description: TODO(這裏用一句話描述這個方法的作用) 
* @param src
* @return
* Mat 
* @throws
 */
public static Mat norm_0_255(Mat src) {
	// 創建和返回一個歸一化後的圖像矩陣
	Mat dst = new Mat();
	switch(src.channels()) {
		case 1: Core.normalize(src, dst, 0, 255, Core.NORM_MINMAX, CvType.CV_8UC1); break;
		case 3: Core.normalize(src, dst, 0, 255, Core.NORM_MINMAX, CvType.CV_8UC3); break;
		default: src.copyTo(dst);break;
	}
	return dst;
}
 
開發者ID:IaHehe,項目名稱:classchecks,代碼行數:20,代碼來源:ImgprocessUtils.java


注:本文中的org.opencv.core.Mat.channels方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。