本文整理匯總了Java中org.opencv.core.Mat.width方法的典型用法代碼示例。如果您正苦於以下問題:Java Mat.width方法的具體用法?Java Mat.width怎麽用?Java Mat.width使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.opencv.core.Mat
的用法示例。
在下文中一共展示了Mat.width方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: rotate
import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
* Rotate an image by an angle (counterclockwise)
*
* @param image Transform matrix
* @param angle Angle to rotate by (counterclockwise) from -360 to 360
*/
public static void rotate(Mat image, double angle) {
//Calculate size of new matrix
double radians = Math.toRadians(angle);
double sin = Math.abs(Math.sin(radians));
double cos = Math.abs(Math.cos(radians));
int newWidth = (int) (image.width() * cos + image.height() * sin);
int newHeight = (int) (image.width() * sin + image.height() * cos);
// rotating image
Point center = new Point(newWidth / 2, newHeight / 2);
Mat rotMatrix = Imgproc.getRotationMatrix2D(center, angle, 1.0); //1.0 means 100 % scale
Size size = new Size(newWidth, newHeight);
Imgproc.warpAffine(image, image, rotMatrix, image.size());
}
示例2: imagePadding
import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
* Apply padding to the image.
*/
private Mat imagePadding(Mat source, int blockSize) {
int width = source.width();
int height = source.height();
int bottomPadding = 0;
int rightPadding = 0;
if (width % blockSize != 0) {
bottomPadding = blockSize - (width % blockSize);
}
if (height % blockSize != 0) {
rightPadding = blockSize - (height % blockSize);
}
Core.copyMakeBorder(source, source, 0, bottomPadding, 0, rightPadding, Core.BORDER_CONSTANT, Scalar.all(0));
return source;
}
示例3: gerarCinza
import org.opencv.core.Mat; //導入方法依賴的package包/類
public void gerarCinza(Mat original, Mat cinza) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
int width = original.width(); //largura
int height = original.height(); //altura
// System.out.println(width+"/"+height);
for (int w = 0; w < width; w++) {
for (int h = 0; h < height; h++) {
// System.out.println(w+"/"+h);
double[] rgb = cinza.get(h, w);
double cor = ((rgb[0] + rgb[1] + rgb[2]) / 3);
double[] tom = { cor, cor, cor };
cinza.put(h, w, tom);
}
}
salvarImagem("paisagemcinza.png", cinza);
}
示例4: gerarBinarizacao
import org.opencv.core.Mat; //導入方法依賴的package包/類
public void gerarBinarizacao(Mat cinza, Mat binaria) {
int width = cinza.width();
int height = cinza.height();
for (int w = 0; w < width; w++) {
for (int h = 0; h < height; h++) {
double cor = 0;
double[] pixel = cinza.get(h, w);
if (pixel[0] > 100)
cor = 255;
double[] tom = { cor, cor, cor };
binaria.put(h, w, tom);
}
}
salvarImagem("paisagembinaria.png", binaria);
}
示例5: logicaE
import org.opencv.core.Mat; //導入方法依賴的package包/類
public void logicaE (String output){
manipulacao m = new manipulacao();
Mat e = m.copiarImagem(imagem1);
int height = e.height();
int width = e.width();
for (int h = 0; h < height; h++) {
for (int w = 0; w < width; w++) {
double[] rgb1 = imagem1.get(h, w);
double[] rgb2 = imagem2.get(h, w);
double[] branco = {255,255,255};
e.put(h, w, branco);
if(rgb1[0] == 0 && rgb2[0] == 0){
double[] preto = {0,0,0};
e.put(h, w, preto);
}
}
}
m.salvarImagem(output, e);
}
示例6: complemento
import org.opencv.core.Mat; //導入方法依賴的package包/類
public void complemento (String output){
manipulacao m = new manipulacao();
Mat com = m.copiarImagem(imagem2);
int height = com.height();
int width = com.width();
for (int h = 0; h < height; h++) {
for (int w = 0; w < width; w++) {
double[] rgb = imagem2.get(h, w);
double[] branco = {255,255,255};
double[] preto = {0,0,0};
if(rgb[0] == 0){
com.put(h, w, branco);
}else {
com.put(h, w, preto);
}
}
}
m.salvarImagem(output, com);
}
示例7: processStillImage
import org.opencv.core.Mat; //導入方法依賴的package包/類
private void processStillImage() {
Mat mat = Imgcodecs.imread("fly.bmp");
for (int c = 0; c < mat.width() / 2; c++) {
for (int r = 0; r < mat.height() / 2; r++) {
double color[] = mat.get(r, c);
color[0] = 255;
mat.put(r, c, color);
//System.out.printf("(%d, %d) = %s\n", r, c, Arrays.toString(color));
}
}
Imgcodecs.imwrite("fly_new.bmp", mat);
Mat gray = new Mat();
Imgproc.cvtColor(mat, gray, Imgproc.COLOR_RGB2GRAY);
Imgcodecs.imwrite("fly_gray.bmp", gray);
}
示例8: detectMany
import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
*
* @Title: detectMany
* @Description: 檢測多人圖片,返回檢測的人臉區域對象
* @param mImgSRC
* @return
* MatOfRect
*/
public static MatOfRect detectMany(Mat mImgSRC) {
if(mImgSRC.empty()) {
LOG.info("檢測多人圖片檢測時沒有找到圖片");
return null;
}
// 人臉檢測器文件的所在路徑的文件夾名稱數組
String [] pathKey = {ServletContextHolder.getOpenCVHaarcascades(), "haarcascade_frontalface_alt.xml"};
CascadeClassifier cascade = new CascadeClassifier(FileUtils.buildFilePath(pathKey));
if(cascade.empty()) {
LOG.info("人臉檢測級聯加載器為null");
return null;
}
// 記錄搜索到的人臉區域
MatOfRect mOfRect = new MatOfRect();
// 用於計算縮放比例
int scaledWidth = mImgSRC.width();
detectManyObject(mImgSRC, cascade, mOfRect, scaledWidth);
if(mOfRect.toArray().length <= 0) {
LOG.info("沒有檢測到人臉...");
return null;
}
return mOfRect;
}
示例9: scaleTemplateMatch
import org.opencv.core.Mat; //導入方法依賴的package包/類
@SuppressWarnings("unused")
public MatchResult scaleTemplateMatch(Mat scene, Mat templ, Method method, double scaleFactor){
int tw = templ.width();
int th = templ.height();
double currScaleFactor = scaleFactor;
MatchResult bestScore = null;
for(Mat img = templ.clone(); img.width() > tw*0.25;
CvProcessing.resize(img, scaleFactor)){
MatchResult currResult = match(scene, img, method, img);
if(bestScore == null || bestScore.maxVal < currResult.maxVal){
bestScore = currResult;
bestScore.scaleFactor = currScaleFactor;
}
currScaleFactor *= scaleFactor;
}
return bestScore;
}
示例10: atan2
import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
* Calculate bitwise atan2 for the given 2 images.
*/
private void atan2(Mat src1, Mat src2, Mat dst) {
int height = src1.height();
int width = src2.width();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
dst.put(y, x, Core.fastAtan2((float) src1.get(y, x)[0], (float) src2.get(y, x)[0]));
}
}
}
示例11: onFinish
import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
* detect the coordinates of the first frame then remove the listener as it will not be needed any more
* @param frame
*/
@Override
public void onFinish(Mat frame) {
if (frame.height() != 0)
{
screenCameraRatio.x = screenCoordinates.x / frame.width();
screenCameraRatio.y = screenCoordinates.y / frame.height();
laserDetector.removeOnFrameProcessedListeners(this);
}
}
示例12: Picture
import org.opencv.core.Mat; //導入方法依賴的package包/類
public Picture(Mat matInput) {
this.bimg = Mat2BufferedImage.mat2BI(matInput);
this.data = GetPixelArray.pixelArray(bimg, false);
this.dataSingleChannel = GetPixelArray.pixelArray(bimg, true);
this.width = matInput.width();
this.height = matInput.height();
}
示例13: testDetectManyObject
import org.opencv.core.Mat; //導入方法依賴的package包/類
@Test
public void testDetectManyObject() {
String opencvDLL = "G:/java/JavaProjectRelease/classchecks/src/main/webapp/WEB-INF/dll/x64/opencv_java320.dll";
System.load(opencvDLL);
String haarcascade = "haarcascade_frontalface_alt.xml";
CascadeClassifier cascade = new CascadeClassifier(XMLFilePath + haarcascade);
Mat src = Imgcodecs.imread(imageDir + "/split/14.jpg");
MatOfRect objects = new MatOfRect();
int scaledWidth = src.width();
DetectObject.detectManyObject(src, cascade, objects, scaledWidth);
Rect [] rects = objects.toArray();
int i = 0;
for(Rect r : rects) {
/*Imgproc.rectangle(src, new Point(r.x-100 , r.y-100 ),
new Point(r.x + r.width + 80,
r.y + r.height + 80), new Scalar(0, 0, 255), 3);*/
Imgproc.rectangle(src, r.tl(),
r.br(), new Scalar(0, 0, 255), 3);
/*r.width += 120;
r.height += 120;
r.x -= 100;
r.y -= 100;
System.out.println(r);
Mat roi = new Mat(src, r);
Imgcodecs.imwrite("e:/classchecks/2017417/split/"+i+".jpg", roi);
i ++;*/
}
Imgcodecs.imwrite("e:/classchecks/2017417/dectctManyObject.jpg", src);
//Imgcodecs.imwrite("e:/classchecks/dectctManyObject.jpg", src);
}
示例14: matToBufferedImage
import org.opencv.core.Mat; //導入方法依賴的package包/類
public BufferedImage matToBufferedImage(Mat mat) {
if (mat.height() > 0 && mat.width() > 0) {
BufferedImage image = new BufferedImage(mat.width(), mat.height(), BufferedImage.TYPE_3BYTE_BGR);
WritableRaster raster = image.getRaster();
DataBufferByte dataBuffer = (DataBufferByte) raster.getDataBuffer();
byte[] data = dataBuffer.getData();
mat.get(0, 0, data);
return image;
}
return null;
}
示例15: getInputDataResNet18
import org.opencv.core.Mat; //導入方法依賴的package包/類
private float[] getInputDataResNet18(Bitmap bitmap) {
final int INPUT_SIDE_LENGTH = 224;
Mat imageMat = new Mat();
Utils.bitmapToMat(bitmap, imageMat);
Imgproc.cvtColor(imageMat, imageMat, Imgproc.COLOR_RGBA2BGR);
imageMat = centerCropAndScale(imageMat, INPUT_SIDE_LENGTH);
Core.subtract(imageMat, new Scalar(104, 117, 123), imageMat);
imageMat.convertTo(imageMat, CvType.CV_32FC3);
float[] inputData = new float[imageMat.width() * imageMat.height() * imageMat.channels()];
imageMat.get(0, 0, inputData);
return inputData;
}