本文整理匯總了Java中org.opencv.core.Mat.put方法的典型用法代碼示例。如果您正苦於以下問題:Java Mat.put方法的具體用法?Java Mat.put怎麽用?Java Mat.put使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.opencv.core.Mat
的用法示例。
在下文中一共展示了Mat.put方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: adaptativeProcess
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Mat adaptativeProcess(Mat img){
Mat im = new Mat();
Imgproc.threshold(img,im,120,255,Imgproc.THRESH_TRUNC);
im = Thresholding.adaptativeThresholding(im);
Imgproc.medianBlur(im,im,7);
Mat threshImg = Thresholding.InvertImageColor(im);
Thresholding.gridDetection(threshImg);
Mat mat = Mat.zeros(4,2,CvType.CV_32F);
mat.put(0,0,0); mat.put(0,1,512);
mat.put(1,0,0); mat.put(1,1,0);
mat.put(2,0,512); mat.put(2,1,0);
mat.put(3,0,512); mat.put(3,1,512);
mat = Imgproc.getPerspectiveTransform(Thresholding.grid,mat);
Mat M = new Mat();
Imgproc.warpPerspective(threshImg,M,mat, new Size(512,512));
Imgproc.medianBlur(M,M,3);
Imgproc.threshold(M,M,254,255,Imgproc.THRESH_BINARY);
return Thresholding.InvertImageColor(M);
}
示例2: normalProcess
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Mat normalProcess(Mat img){
Mat threshImg = Thresholding.InvertImageColor(img);
Thresholding.gridDetection(threshImg);
Mat mat = Mat.zeros(4,2,CvType.CV_32F);
mat.put(0,0,0); mat.put(0,1,512);
mat.put(1,0,0); mat.put(1,1,0);
mat.put(2,0,512); mat.put(2,1,0);
mat.put(3,0,512); mat.put(3,1,512);
mat = Imgproc.getPerspectiveTransform(Thresholding.grid,mat);
Mat M = new Mat();
Imgproc.warpPerspective(threshImg,M,mat, new Size(512,512));
return Thresholding.InvertImageColor(M);
}
示例3: loadResource
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Mat loadResource(Context context, int resourceId, int flags) throws IOException
{
InputStream is = context.getResources().openRawResource(resourceId);
ByteArrayOutputStream os = new ByteArrayOutputStream(is.available());
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
Mat encoded = new Mat(1, os.size(), CvType.CV_8U);
encoded.put(0, 0, os.toByteArray());
os.close();
Mat decoded = Imgcodecs.imdecode(encoded, flags);
encoded.release();
return decoded;
}
示例4: conv_Mat
import org.opencv.core.Mat; //導入方法依賴的package包/類
private Mat conv_Mat(BufferedImage img) {
byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
Mat mat = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
mat.put(0, 0, data);
Mat mat1 = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
Imgproc.cvtColor(mat, mat1, Imgproc.COLOR_RGB2HSV);
return mat1;
}
開發者ID:javaspecial,項目名稱:Face-detection-and-recognition-desktop-application,代碼行數:10,代碼來源:FaceRecognizeFrame.java
示例5: LocalContrast
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Mat LocalContrast(Mat img) {
double[] h = { 1.0 / 16.0, 4.0 / 16.0, 6.0 / 16.0, 4.0 / 16.0, 1.0 / 16.0 };
Mat mask = new Mat(h.length, h.length, img.type());
for (int i = 0; i < h.length; i++) {
for (int j = 0; j < h.length; j++) {
mask.put(i, j, h[i] * h[j]);
}
}
Mat localContrast = new Mat();
Imgproc.filter2D(img, localContrast, img.depth(), mask);
for (int i = 0; i < localContrast.rows(); i++) {
for (int j = 0; j < localContrast.cols(); j++) {
if (localContrast.get(i, j)[0] > Math.PI / 2.75) localContrast.put(i, j, Math.PI / 2.75);
}
}
Core.subtract(img, localContrast, localContrast);
return localContrast.mul(localContrast);
}
示例6: bufferedImage2Mat
import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
* Converts a buffered image object in to an openCV mat.
* @param image buffered image
* @return mat
*/
public static Mat bufferedImage2Mat(BufferedImage image){
byte[] data = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
Mat mat = new Mat(image.getHeight(), image.getWidth(), CvType.CV_8UC3);
mat.put(0, 0, data);
return mat;
}
示例7: LocalContrast
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Mat LocalContrast(Mat img) {
double[] h = { 1.0 / 16.0, 4.0 / 16.0, 6.0 / 16.0, 4.0 / 16.0, 1.0 / 16.0 };
Mat mask = new Mat(h.length, h.length, img.type());
for (int i = 0; i < h.length; i++) {
for (int j = 0; j < h.length; j++) {
mask.put(i, j, h[i] * h[j]);
}
}
Mat localContrast = new Mat();
Imgproc.filter2D(img, localContrast, img.depth(), mask);
for (int i = 0; i < localContrast.rows(); i++) {
for (int j = 0; j < localContrast.cols(); j++) {
if (localContrast.get(i, j)[0] > Math.PI / 2.75)
localContrast.put(i, j, Math.PI / 2.75);
}
}
Core.subtract(img, localContrast, localContrast);
return localContrast.mul(localContrast);
}
示例8: atan2
import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
* Calculate bitwise atan2 for the given 2 images.
*/
private void atan2(Mat src1, Mat src2, Mat dst) {
int height = src1.height();
int width = src2.width();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
dst.put(y, x, Core.fastAtan2((float) src1.get(y, x)[0], (float) src2.get(y, x)[0]));
}
}
}
示例9: bufferedImageToMat
import org.opencv.core.Mat; //導入方法依賴的package包/類
public Mat bufferedImageToMat(BufferedImage bi) {
Mat mat = new Mat(bi.getHeight(), bi.getWidth(), CvType.CV_8UC3);
byte[] data = ((DataBufferByte) bi.getRaster().getDataBuffer()).getData();
mat.put(0, 0, data);
return mat;
}
示例10: transEstimateEachChannel
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Mat transEstimateEachChannel(Mat img, int patchSz, double airlight, double lambda, double fTrans) {
int rows = img.rows();
int cols = img.cols();
Mat T = new Mat(rows, cols, img.type());
for (int i = 0; i < rows; i += patchSz) {
for (int j = 0; j < cols; j += patchSz) {
int endRow = i + patchSz > rows ? rows : i + patchSz;
int endCol = j + patchSz > cols ? cols : j + patchSz;
Mat blkIm = img.submat(i, endRow, j, endCol);
double Trans = BlkTransEstimate.blkEstimateEachChannel(blkIm, airlight, lambda, fTrans);
for (int m = i; m < endRow; m++) for (int n = j; n < endCol; n++) T.put(m, n, Trans);
}
}
return T;
}
示例11: ExtractBoxes
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Mat ExtractBoxes(Mat im, int connectiviy){
try{
Mat stats = new Mat();
Mat imB = Processing.binarize(im,Imgproc.THRESH_OTSU);
Imgproc.connectedComponentsWithStats(imB,new Mat(),stats,new Mat(),connectiviy,CvType.CV_32S);
Mat stat = Mat.zeros(81,5,CvType.CV_32F);
double mcw = stats.get(0,2)[0]; //max contour width
double mch = stats.get(0,3)[0]; // max contour height
int j=0;
for(int i = 1; i < stats.height(); i++){
double area = stats.get(i,2)[0] * stats.get(i,3)[0];
if(area < (mcw/8)*(mch/8) && area > (mcw/25)*(mch/25)){
stat.put(j,0,stats.get(i,0)[0]);
stat.put(j,1,stats.get(i,1)[0]);
stat.put(j,2,stats.get(i,2)[0]);
stat.put(j,3,stats.get(i,3)[0]);
stat.put(j,4,stats.get(i,4)[0]);
j +=1;
}
}
stat = Processing.statSorted(stat,0);
/*if(stat.get(0,0)[0] == 0 && stat.get(0,1)[0] ==0 && (stat.get(0,2)[0] ==0 || stat.get(0,3)[0] ==0))
return null;*/
return stat;
}
catch(Exception e){
return null;
}
}
示例12: Exposedness
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Mat Exposedness(Mat img) {
double sigma = 0.25;
double average = 0.5;
int rows = img.rows();
int cols = img.cols();
Mat exposedness = Mat.zeros(rows, cols, img.type());
// W = exp(-(img - aver).^2 / (2*sigma^2));
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
double value = Math.exp(-1.0 * Math.pow(img.get(i, j)[0] - average, 2.0) / (2 * Math.pow(sigma, 2.0)));
exposedness.put(i, j, value);
}
}
return exposedness;
}
示例13: executar
import org.opencv.core.Mat; //導入方法依賴的package包/類
public void executar(String output1, String output2) {
int height1 = original1.height();
int width1 = original1.width();
int height2 = original2.height();
int width2 = original2.width();
Mat imgCinza = new Mat(height1, width1, original1.type());
Mat imgColorida = new Mat(height2, width2, original2.type());
for (int h = 1; h < height1 - 1; h++) {
for (int w = 1; w < width1 - 1; w++) {
int cor1 = getMediaCinza(h, w);
double[] tom1 = {cor1, cor1, cor1};
imgCinza.put(h, w, tom1);
}
}
for (int h = 1; h < height2 - 1; h++) {
for (int w = 1; w < width2 - 1; w++) {
imgColorida.put(h, w, getMediaColorida(h, w));
}
}
Manipulacao m = new Manipulacao();
m.salvarImagem(output1, imgCinza);
m.salvarImagem(output2, imgColorida);
}
示例14: detectMinutiae
import org.opencv.core.Mat; //導入方法依賴的package包/類
private Mat detectMinutiae(Mat skeleton, int border) {
HashSet<Minutiae> minutiaeSet = new HashSet<>();
System.out.println("Detecting minutiae");
for(int c = border; c<skeleton.cols()-border; c++){
for(int r = border; r<skeleton.rows()-border; r++) {
double point = skeleton.get(r, c)[0];
if (point != 0) { // Not black
int cn = neighbourCount(skeleton, r, c);
if(cn == 1)
minutiaeSet.add(new Minutiae(c, r, Minutiae.Type.RIDGEENDING));
else if(cn == 3)
minutiaeSet.add(new Minutiae(c, r, Minutiae.Type.BIFURCATION));
}
}
}
System.out.println("filtering minutiae");
HashSet<Minutiae> filteredMinutiae = filterMinutiae(minutiaeSet, skeleton);
System.out.println("number of minutiae: " + filteredMinutiae.size());
Mat result = new Mat();
System.out.println("Drawing minutiae");
Imgproc.cvtColor(skeleton, result, Imgproc.COLOR_GRAY2RGB);
double[] red = {255, 0, 0};
double[] green = {0, 255, 0};
for (Minutiae m : filteredMinutiae) {
double [] color;
if (m.type == Minutiae.Type.BIFURCATION) color = green;
else color = red;
result.put(m.y, m.x , color);
result.put(m.y, m.x-1, color);
result.put(m.y, m.x+1, color);
result.put(m.y-1, m.x , color);
result.put(m.y+1, m.x , color);
}
MatOfKeyPoint keypoints = new MatOfKeyPoint();
keypoints.fromArray(minutiaeToKeyPoints(skeleton, filteredMinutiae));
keypointsField = keypoints;
DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
Mat descriptors = new Mat();
extractor.compute(skeleton, keypoints, descriptors);
descriptorsField = descriptors;
return result;
}
示例15: onActivityResult
import org.opencv.core.Mat; //導入方法依賴的package包/類
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent imageReturnedIntent) {
//Put it there, just in case:)
super.onActivityResult(requestCode, resultCode, imageReturnedIntent);
switch(requestCode) {
case SELECT_PHOTO:
if(resultCode == RESULT_OK && read_external_storage_granted){
try {
final Uri imageUri = imageReturnedIntent.getData();
final InputStream imageStream = getContentResolver().openInputStream(imageUri);
final Bitmap selectedImage = BitmapFactory.decodeStream(imageStream);
src = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC4);
Utils.bitmapToMat(selectedImage, src);
src_gray = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC1);
switch (ACTION_MODE) {
case HomeActivity.GAUSSIAN_BLUR:
Imgproc.GaussianBlur(src, src, new Size(9, 9), 0);
break;
case HomeActivity.MEAN_BLUR:
Imgproc.blur(src, src, new Size(9, 9));
break;
case HomeActivity.MEDIAN_BLUR:
Imgproc.medianBlur(src, src, 9);
break;
case HomeActivity.SHARPEN:
Mat kernel = new Mat(3, 3, CvType.CV_16SC1);
//int[] values = {0, -1, 0, -1, 5, -1, 0, -1, 0};
Log.d("imageType", CvType.typeToString(src.type()) + "");
kernel.put(0, 0, 0, -1, 0, -1, 5, -1, 0, -1, 0);
Imgproc.filter2D(src, src, src_gray.depth(), kernel);
break;
case HomeActivity.DILATE:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
Mat kernelDilate = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
Imgproc.dilate(src_gray, src_gray, kernelDilate);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case HomeActivity.ERODE:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
Mat kernelErode = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
Imgproc.erode(src_gray, src_gray, kernelErode);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case HomeActivity.THRESHOLD:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case HomeActivity.ADAPTIVE_THRESHOLD:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.adaptiveThreshold(src_gray, src_gray, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 3, 0);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
}
Bitmap processedImage = Bitmap.createBitmap(src.cols(), src.rows(), Bitmap.Config.ARGB_8888);
Log.i("imageType", CvType.typeToString(src.type()) + "");
Utils.matToBitmap(src, processedImage);
ivImage.setImageBitmap(selectedImage);
ivImageProcessed.setImageBitmap(processedImage);
Log.i("process", "process done");
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
break;
}
}