本文整理汇总了C#中Mat.ConvertTo方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.ConvertTo方法的具体用法?C# Mat.ConvertTo怎么用?C# Mat.ConvertTo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat.ConvertTo方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FilterTiles
public void FilterTiles(Mat image, Mat modifiedMat)
{
CvInvoke.Imshow("0", image);
Stopwatch sw1 = new Stopwatch();
sw1.Start();
Mat laplaced = new Mat();
CvInvoke.CvtColor(image, laplaced, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
Mat greyResult = laplaced.Clone();
Mat greySource = laplaced.Clone();
Mat cannySrc = new Mat();
//if not half inch, do canny and subtract to separate tiles better. Basically "sharpens" the edge
if (scan.TileSettings.CannyEdges)
{
//create canny image, these parameters could be adjusted probably?
CvInvoke.Canny(greySource, greyResult, 50, 150);
//dilate canny
CvInvoke.Dilate(greyResult, greyResult, null, new System.Drawing.Point(1, 1), scan.TileSettings.CannyDilate, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(1, 1), scan.TileSettings.CannyDilate, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
CvInvoke.Imshow("1a", greyResult);
//subtract dilated canny from source to get separation
CvInvoke.Subtract(greySource, greyResult, greyResult);
greySource = greyResult.Clone();
CvInvoke.Imshow("1b", greyResult);
}
if (scan.TileSettings.ThresholdEdges)
{
Mat edges = new Mat();
CvInvoke.Threshold(greyResult, edges, (float)thresholdTrackbar.Value, 0, ThresholdType.ToZero);
CvInvoke.Subtract(greySource, edges, greyResult);
CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(1, 1), 2, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
CvInvoke.Imshow("pres-1c", greyResult);
}
//perform distance transform
CvInvoke.DistanceTransform(greyResult, greyResult, null, DistType.L2, 5);
//normalize the image to bring out the peaks
CvInvoke.Normalize(greyResult, greyResult, 0, 1, NormType.MinMax);
CvInvoke.Imshow("2", greyResult);
//threshold the image, different thresholds for different tiles
CvInvoke.Threshold(greyResult, greyResult, scan.TileSettings.ThresholdVal, 1, ThresholdType.Binary);
CvInvoke.Imshow("3", greyResult);
//erode to split the blobs
CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(-1, -1), scan.TileSettings.ThresholdErode, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
//convert to 8 bit unsigned needed for canny
greyResult.ConvertTo(greyResult, DepthType.Cv8U);
VectorOfVectorOfPoint markers = new VectorOfVectorOfPoint();
//create 32bit, single channel image for result of markers
Mat markerImage = new Mat(greyResult.Size, DepthType.Cv32S, 1);
//set image to 0
markerImage.SetTo(new MCvScalar(0, 0, 0));
//find the contours
CvInvoke.FindContours(greyResult, markers, null, RetrType.External, ChainApproxMethod.LinkRuns);
//label the markers from 1 -> n, the rest of the image should remain 0
for (int i = 0; i < markers.Size; i++)
CvInvoke.DrawContours(markerImage, markers, i, new MCvScalar(i + 1, i + 1, i + 1), -1);
ScalarArray mult = new ScalarArray(5000);
Mat markerVisual = new Mat();
CvInvoke.Multiply(markerImage, mult, markerVisual);
CvInvoke.Imshow("4", markerVisual);
//draw the background marker
CvInvoke.Circle(markerImage,
new System.Drawing.Point(5, 5),
3,
new MCvScalar(255, 255, 255),
-1);
//convert to 3 channel
Mat convertedOriginal = new Mat();
//use canny modified if 3/4", or use the gray image for others
CvInvoke.CvtColor(greySource, convertedOriginal, ColorConversion.Gray2Bgr);
//watershed!!
CvInvoke.Watershed(convertedOriginal, markerImage);
//visualize
CvInvoke.Multiply(markerImage, mult, markerVisual);
CvInvoke.Imshow("5", markerVisual);
//get contours to get the actual tiles now that they are separate...
//.........这里部分代码省略.........
示例2: StereoCorrespondence
public StereoCorrespondence()
{
// cvFindStereoCorrespondenceBM + cvFindStereoCorrespondenceGC
// ブロックマッチング, グラフカットの両アルゴリズムによるステレオマッチング
// 入力画像の読み込み
using (IplImage imgLeft = new IplImage(Const.ImageTsukubaLeft, LoadMode.GrayScale))
using (IplImage imgRight = new IplImage(Const.ImageTsukubaRight, LoadMode.GrayScale))
{
// 視差画像, 出力画像の領域を確保
using (IplImage dispBM = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (IplImage dispLeft = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (IplImage dispRight = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (IplImage dstBM = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (IplImage dstGC = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (IplImage dstAux = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (Mat dstSGBM = new Mat())
{
// 距離計測とスケーリング
int sad = 3;
using (CvStereoBMState stateBM = new CvStereoBMState(StereoBMPreset.Basic, 16))
using (CvStereoGCState stateGC = new CvStereoGCState(16, 2))
using (StereoSGBM sgbm = new StereoSGBM()
{
MinDisparity = 0,
NumberOfDisparities = 32,
PreFilterCap = 63,
SADWindowSize = sad,
P1 = 8 * imgLeft.NChannels * sad * sad,
P2 = 32 * imgLeft.NChannels * sad * sad,
UniquenessRatio = 10,
SpeckleWindowSize = 100,
SpeckleRange = 32,
Disp12MaxDiff = 1,
FullDP = false,
})
{
Cv.FindStereoCorrespondenceBM(imgLeft, imgRight, dispBM, stateBM); // stateBM.FindStereoCorrespondence(imgLeft, imgRight, dispBM);
Cv.FindStereoCorrespondenceGC(imgLeft, imgRight, dispLeft, dispRight, stateGC, false); // stateGC.FindStereoCorrespondence(imgLeft, imgRight, dispLeft, dispRight, false);
Cv.FindStereoCorrespondence(imgLeft, imgRight, DisparityMode.Birchfield, dstAux, 50, 25, 5, 12, 15, 25);
sgbm.FindCorrespondence(new Mat(imgLeft), new Mat(imgRight), dstSGBM);
Cv.ConvertScale(dispBM, dstBM, 1);
Cv.ConvertScale(dispLeft, dstGC, -16);
Cv.ConvertScale(dstAux, dstAux, 16);
dstSGBM.ConvertTo(dstSGBM, dstSGBM.Type, 32, 0);
using (new CvWindow("Stereo Correspondence (BM)", dstBM))
using (new CvWindow("Stereo Correspondence (GC)", dstGC))
using (new CvWindow("Stereo Correspondence (cvaux)", dstAux))
using (new CvWindow("Stereo Correspondence (SGBM)", dstSGBM.ToIplImage()))
{
Cv.WaitKey();
}
}
}
}
}
示例3: Run
public void Run()
{
// Load left&right images
using (var imgLeft = new IplImage(FilePath.Image.TsukubaLeft, LoadMode.GrayScale))
using (var imgRight = new IplImage(FilePath.Image.TsukubaRight, LoadMode.GrayScale))
{
// output image buffers
using (var dispBM = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (var dispLeft = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (var dispRight = new IplImage(imgLeft.Size, BitDepth.S16, 1))
using (var dstBM = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (var dstGC = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (var dstAux = new IplImage(imgLeft.Size, BitDepth.U8, 1))
using (var dstSGBM = new Mat())
{
// measures distance and scales
const int sad = 3;
using (var stateBM = new CvStereoBMState(StereoBMPreset.Basic, 16))
using (var stateGC = new CvStereoGCState(16, 2))
using (var sgbm = new StereoSGBM() // C++
{
MinDisparity = 0,
NumberOfDisparities = 32,
PreFilterCap = 63,
SADWindowSize = sad,
P1 = 8 * imgLeft.NChannels * sad * sad,
P2 = 32 * imgLeft.NChannels * sad * sad,
UniquenessRatio = 10,
SpeckleWindowSize = 100,
SpeckleRange = 32,
Disp12MaxDiff = 1,
FullDP = false,
})
{
Cv.FindStereoCorrespondenceBM(imgLeft, imgRight, dispBM, stateBM);
Cv.FindStereoCorrespondenceGC(imgLeft, imgRight, dispLeft, dispRight, stateGC, false);
Cv.FindStereoCorrespondence(imgLeft, imgRight, DisparityMode.Birchfield, dstAux, 50, 25, 5, 12, 15, 25); // cvaux
sgbm.Compute(new Mat(imgLeft), new Mat(imgRight), dstSGBM);
Cv.ConvertScale(dispBM, dstBM, 1);
Cv.ConvertScale(dispLeft, dstGC, -16);
Cv.ConvertScale(dstAux, dstAux, 16);
dstSGBM.ConvertTo(dstSGBM, dstSGBM.Type(), 32, 0);
using (new CvWindow("Stereo Correspondence (BM)", dstBM))
using (new CvWindow("Stereo Correspondence (GC)", dstGC))
using (new CvWindow("Stereo Correspondence (cvaux)", dstAux))
using (new CvWindow("Stereo Correspondence (SGBM)", dstSGBM.ToIplImage()))
{
Cv.WaitKey();
}
}
}
}
}
示例4: DoTracking
// FaceTracking
void DoTracking()
{
//while (running)
//{
try
{
if (kinect.GetDepthRaw())
{
//lock (this)
//{
src = DoDepthBuffer(kinect.usersDepthMap, KinectWrapper.GetDepthWidth(), KinectWrapper.GetDepthHeight());
roi = src.Clone(new OpenCvSharp.CPlusPlus.Rect(roiX, roiY, roiW, roiH));
roi.ConvertTo(roi, OpenCvSharp.CPlusPlus.MatType.CV_8U, 255.0 / 32000.0);
Cv2.Subtract(new Mat(roiH, roiW, MatType.CV_8UC1, new Scalar(255)), roi, roi);
double threshMax = 255.0 - ((255.0 / 32000.0) * ((ushort)srcThreshMax << 3));
double threshMin = 255.0 - ((255.0 / 32000.0) * ((ushort)srcThreshMin << 3));
roi = roi.Threshold(threshMin, 255.0, ThresholdType.ToZeroInv);
roi = roi.Threshold(threshMax, 255.0, ThresholdType.ToZero);
// Flip up/down dimension and right/left dimension
if (!FlipUpDownAxis && FlipLeftRightAxis)
roi.Flip(FlipMode.XY);
else if (!FlipUpDownAxis)
roi.Flip(FlipMode.X);
else if (FlipLeftRightAxis)
roi.Flip(FlipMode.Y);
//Apply ellliptical mask
Mat ellipseMask = new Mat(roi.Rows, roi.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8U, new Scalar(0.0));
Cv2.Ellipse(ellipseMask, new Point(ellipseMaskCenterX, ellipseMaskCenterY), new Size(axisMaskX, axisMaskY), maskAngle, maskStartAngle, maskEndAngle, new Scalar(255.0), -1);
Cv2.BitwiseAnd(roi, ellipseMask, roi);
//Remove noise by morphologyEx
Mat kernel = Cv2.GetStructuringElement(StructuringElementShape.Ellipse, new Size(3, 3));
Cv2.MorphologyEx(roi, roi, MorphologyOperation.Open, kernel);
Cv2.MorphologyEx(roi, roi, MorphologyOperation.Close, kernel);
//Subtract background
if (first)
{
bg = roi.Clone();
//bg = bg.Blur(new Size(smoothBlur, smoothBlur));
first = false;
}
fg = bg.Clone();
//roi = roi.Blur(new Size(smoothBlur, smoothBlur));
Mat subMask = roi.Clone();
subMask = subMask.Threshold(smThresh, 255.0, ThresholdType.ToZero);
//Cv2.ImShow("sm",subMask);
bg.CopyTo(roi, subMask);
OpenCvSharp.Cv.AbsDiff(roi.ToCvMat(), bg.ToCvMat(), fg.ToCvMat());
//Threshold foreground image
fgthresh = fg.Threshold(threshold, 255.0, ThresholdType.Binary);
fgthresh = fgthresh.Blur(new Size(smoothBlur, smoothBlur));
//Detect Blobs
Mat roiToImg = new Mat(roi.Cols, roi.Rows, MatType.CV_8UC3);
Mat threshToImg = fgthresh.Clone();
Cv2.Merge(new Mat[] { roi, roi, roi }, roiToImg);
IplImage showImg = roiToImg.ToIplImage();
IplImage fgthreshImg = threshToImg.ToIplImage();
OpenCvSharp.Blob.CvBlobLib.Label(fgthreshImg, blobs);
OpenCvSharp.Blob.CvBlobLib.FilterByArea(blobs, blobMinArea, blobMaxArea);
OpenCvSharp.Blob.CvBlobLib.RenderBlobs(blobs, fgthreshImg, showImg, RenderBlobsMode.Color | RenderBlobsMode.Centroid);
UpdateTracks(blobs, tracks, blobMinDistance, blobMaxLife);
//OpenCvSharp.Blob.CvBlobLib.RenderTracks(tracks, fgthreshImg, showImg, RenderTracksMode.BoundingBox | RenderTracksMode.Id);
RenderTracks(showImg);
//Cv.ShowImage("thres", fgthreshImg);
Cv.ShowImage("showBlob", showImg);
//Check Blob Actions
//Debug.Log(tracks.Count);
//}
}
}
catch (System.Exception e)
{
//throw e;
Debug.Log(e.Message + " " + e.StackTrace);
}
//}
}
示例5: DoOCR
public void DoOCR(CvKNearest kNearest, string path)
{
var src = Cv2.ImRead(path);
Cv2.ImShow("Source", src);
var gray = new Mat();
Cv2.CvtColor(src, gray, ColorConversion.BgrToGray);
var threshImage = new Mat();
Cv2.Threshold(gray, threshImage, Thresh, ThresholdMaxVal, ThresholdType.BinaryInv); // Threshold to find contour
Point[][] contours;
HiearchyIndex[] hierarchyIndexes;
Cv2.FindContours(
threshImage,
out contours,
out hierarchyIndexes,
mode: ContourRetrieval.CComp,
method: ContourChain.ApproxSimple);
if (contours.Length == 0)
{
throw new NotSupportedException("Couldn't find any object in the image.");
}
//Create input sample by contour finding and cropping
var dst = new Mat(src.Rows, src.Cols, MatType.CV_8UC3, Scalar.All(0));
var contourIndex = 0;
while ((contourIndex >= 0))
{
var contour = contours[contourIndex];
var boundingRect = Cv2.BoundingRect(contour); //Find bounding rect for each contour
Cv2.Rectangle(src,
new Point(boundingRect.X, boundingRect.Y),
new Point(boundingRect.X + boundingRect.Width, boundingRect.Y + boundingRect.Height),
new Scalar(0, 0, 255),
2);
var roi = new Mat(threshImage, boundingRect); //Crop the image
var resizedImage = new Mat();
var resizedImageFloat = new Mat();
Cv2.Resize(roi, resizedImage, new Size(10, 10)); //resize to 10X10
resizedImage.ConvertTo(resizedImageFloat, MatType.CV_32FC1); //convert to float
var result = resizedImageFloat.Reshape(1, 1);
var results = new Mat();
var neighborResponses = new Mat();
var dists = new Mat();
var detectedClass = (int)kNearest.FindNearest(result, 1, results, neighborResponses, dists);
//Console.WriteLine("DetectedClass: {0}", detectedClass);
//Cv2.ImShow("roi", roi);
//Cv.WaitKey(0);
//Cv2.ImWrite(string.Format("det_{0}_{1}.png",detectedClass, contourIndex), roi);
Cv2.PutText(
dst,
detectedClass.ToString(CultureInfo.InvariantCulture),
new Point(boundingRect.X, boundingRect.Y + boundingRect.Height),
0,
1,
new Scalar(0, 255, 0),
2);
contourIndex = hierarchyIndexes[contourIndex].Next;
}
Cv2.ImShow("Segmented Source", src);
Cv2.ImShow("Detected", dst);
Cv2.ImWrite("dest.jpg", dst);
Cv2.WaitKey();
}
示例6: processTrainingImage
private static Mat processTrainingImage(Mat gray)
{
var threshImage = new Mat();
Cv2.Threshold(gray, threshImage, Thresh, ThresholdMaxVal, ThresholdType.BinaryInv); // Threshold to find contour
Point[][] contours;
HiearchyIndex[] hierarchyIndexes;
Cv2.FindContours(
threshImage,
out contours,
out hierarchyIndexes,
mode: ContourRetrieval.CComp,
method: ContourChain.ApproxSimple);
if (contours.Length == 0)
{
return null;
}
Mat result = null;
var contourIndex = 0;
while ((contourIndex >= 0))
{
var contour = contours[contourIndex];
var boundingRect = Cv2.BoundingRect(contour); //Find bounding rect for each contour
var roi = new Mat(threshImage, boundingRect); //Crop the image
//Cv2.ImShow("src", gray);
//Cv2.ImShow("roi", roi);
//Cv.WaitKey(0);
var resizedImage = new Mat();
var resizedImageFloat = new Mat();
Cv2.Resize(roi, resizedImage, new Size(10, 10)); //resize to 10X10
resizedImage.ConvertTo(resizedImageFloat, MatType.CV_32FC1); //convert to float
result = resizedImageFloat.Reshape(1, 1);
contourIndex = hierarchyIndexes[contourIndex].Next;
}
return result;
}
示例7: Dft
public static void Dft(string path)
{
Mat img = Cv2.ImRead(path, LoadMode.GrayScale);
// expand input image to optimal size
Mat padded = new Mat();
int m = Cv2.GetOptimalDFTSize(img.Rows);
int n = Cv2.GetOptimalDFTSize(img.Cols); // on the border add zero values
Cv2.CopyMakeBorder(img, padded, 0, m - img.Rows, 0, n - img.Cols, BorderType.Constant, Scalar.All(0));
// Add to the expanded another plane with zeros
Mat paddedF32 = new Mat();
padded.ConvertTo(paddedF32, MatType.CV_32F);
Mat[] planes = { paddedF32, Mat.Zeros(padded.Size(), MatType.CV_32F) };
Mat complex = new Mat();
Cv2.Merge(planes, complex);
// this way the result may fit in the source matrix
Mat dft = new Mat();
Cv2.Dft(complex, dft);
// compute the magnitude and switch to logarithmic scale
// => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
Mat[] dftPlanes;
Cv2.Split(dft, out dftPlanes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
// planes[0] = magnitude
Mat magnitude = new Mat();
Cv2.Magnitude(dftPlanes[0], dftPlanes[1], magnitude);
magnitude += Scalar.All(1); // switch to logarithmic scale
Cv2.Log(magnitude, magnitude);
// crop the spectrum, if it has an odd number of rows or columns
Mat spectrum = magnitude[
new Rect(0, 0, magnitude.Cols & -2, magnitude.Rows & -2)];
// rearrange the quadrants of Fourier image so that the origin is at the image center
int cx = spectrum.Cols / 2;
int cy = spectrum.Rows / 2;
Mat q0 = new Mat(spectrum, new Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
Mat q1 = new Mat(spectrum, new Rect(cx, 0, cx, cy)); // Top-Right
Mat q2 = new Mat(spectrum, new Rect(0, cy, cx, cy)); // Bottom-Left
Mat q3 = new Mat(spectrum, new Rect(cx, cy, cx, cy)); // Bottom-Right
// swap quadrants (Top-Left with Bottom-Right)
Mat tmp = new Mat();
q0.CopyTo(tmp);
q3.CopyTo(q0);
tmp.CopyTo(q3);
// swap quadrant (Top-Right with Bottom-Left)
q1.CopyTo(tmp);
q2.CopyTo(q1);
tmp.CopyTo(q2);
// Transform the matrix with float values into a
Cv2.Normalize(spectrum, spectrum, 0, 1, NormType.MinMax);
// Show the result
Cv2.ImShow("Spectrum Magnitude", spectrum);
Cv2.WaitKey(0);
Cv2.DestroyAllWindows();
}
示例8: TestImageViewMat
public static void TestImageViewMat()
{
Mat m = CvInvoke.Imread(EmguAssert.GetFile("box.png"), LoadImageType.AnyColor);
Mat m2 = new Mat();
CvInvoke.CvtColor(m, m2, ColorConversion.Gray2Rgb);
Mat m3 = new Mat();
m2.ConvertTo(m3, DepthType.Cv16U);
ImageViewer.Show(m3);
}
示例9: updateBrightnessContrast
private static void updateBrightnessContrast(Mat src, Mat modifiedSrc, int brightness, int contrast)
{
brightness = brightness - 100;
contrast = contrast - 100;
double alpha, beta;
if (contrast > 0)
{
double delta = 127f * contrast / 100f;
alpha = 255f / (255f - delta * 2);
beta = alpha * (brightness - delta);
}
else
{
double delta = -128f * contrast / 100;
alpha = (256f - delta * 2) / 255f;
beta = alpha * brightness + delta;
}
src.ConvertTo(modifiedSrc, MatType.CV_8UC3, alpha, beta);
}
示例10: TestBOWKmeansTrainer2
public void TestBOWKmeansTrainer2()
{
Image<Gray, byte> box = EmguAssert.LoadImage<Gray, byte>("box.png");
Brisk detector = new Brisk(30, 3, 1.0f);
VectorOfKeyPoint kpts = new VectorOfKeyPoint();
Mat descriptors = new Mat();
detector.DetectAndCompute(box, null, kpts, descriptors, false);
Mat descriptorsF = new Mat();
descriptors.ConvertTo(descriptorsF, CvEnum.DepthType.Cv32F);
//Matrix<float> descriptorsF = descriptors.Convert<float>();
BOWKMeansTrainer trainer = new BOWKMeansTrainer(100, new MCvTermCriteria(), 3, CvEnum.KMeansInitType.PPCenters);
trainer.Add(descriptorsF);
Mat vocabulary = new Mat();
trainer.Cluster(vocabulary);
BFMatcher matcher = new BFMatcher(DistanceType.L2);
BOWImgDescriptorExtractor extractor = new BOWImgDescriptorExtractor(detector, matcher);
Mat vocabularyByte = new Mat();
vocabulary.ConvertTo(vocabularyByte, CvEnum.DepthType.Cv8U);
extractor.SetVocabulary(vocabularyByte);
Mat descriptors2 = new Mat();
extractor.Compute(box, kpts, descriptors2);
}
示例11: BinaryImage
//.........这里部分代码省略.........
{
binary1Matrix[rowIndex, columnIndex] = 0;
}
if (diffValue < -nt
|| cannyMatrix[rowIndex, columnIndex] == 255)
{
binary2Matrix[rowIndex, columnIndex] = 255;
}
else
{
binary2Matrix[rowIndex, columnIndex] = 0;
}
}
}
VectorOfVectorOfPoint contoursVector = new VectorOfVectorOfPoint();
//var connectedComponents = FindBlobs(binary1Matrix.Mat, number);
CvInvoke.FindContours(binary1Matrix.Mat.Clone(), contoursVector, null, RetrType.List, ChainApproxMethod.ChainApproxNone);
var minWidth = 0.045 * gray.Width;
var maxWidth = 0.25 * gray.Width;
var minHeight = 0.045 * gray.Height;
var maxHeight = 0.25 * gray.Height;
Mat detectedDigits = new Mat();
//(gray.Size, DepthType.Cv32S, 3);
//color.ConvertTo(colorComponents, DepthType.Cv32S);
gray.ConvertTo(detectedDigits, DepthType.Cv32S);
for (int compIndex = 0; compIndex < contoursVector.Size; compIndex++)
{
var component = contoursVector[compIndex];
var subSampleData = await SubSampling(color, component);
byte[,,] colorComponents = new byte[color.Rows, color.Cols, 3];
for(int row = 0; row < subSampleData.GetLength(0); row++)
{
for(int column = 0; column < subSampleData.GetLength(1); column++)
{
var r = subSampleData[row, column, 2];
var g = subSampleData[row, column, 1];
var b = subSampleData[row, column, 0];
if(r > 0)
{
colorComponents[row, column, 2] = color.GetData(row, column)[2];
colorComponents[row, column, 1] = color.GetData(row, column)[1];
colorComponents[row, column, 0] = color.GetData(row, column)[0];
}
}
}
var compRectangle = CvInvoke.BoundingRectangle(component);
var ratio = (double)compRectangle.Width / compRectangle.Height;
var inversedRatio = (double) 1 / ratio;
using (var colorImage = new Image<Bgr, byte>(subSampleData))
示例12: FindBlobs
VectorOfVectorOfPoint FindBlobs(Mat binary, int index)
{
VectorOfVectorOfPoint blobs = new VectorOfVectorOfPoint();
// Fill the label_image with the blobs
// 0 - background
// 1 - unlabelled foreground
// 2+ - labelled foreground
Mat label_image = new Mat();
binary.ConvertTo(label_image, DepthType.Cv8U);
int label_count = 2; // starts at 2 because 0,1 are used already
for (int y = 0; y < label_image.Rows; y++)
{
for (int x = 0; x < label_image.Cols; x++)
{
var val = label_image.GetData(y, x)[0];
if (val != 255)
{
continue;
}
System.Drawing.Rectangle rect;
CvInvoke.FloodFill(label_image, new Mat(), new System.Drawing.Point(x, y), new MCvScalar(label_count), out rect, new MCvScalar(0), new MCvScalar(0), Connectivity.FourConnected, FloodFillType.Default);
//cv::floodFill(label_image, cv::Point(x,y), label_count, &rect, 0, 0, 4);
VectorOfPoint blob = new VectorOfPoint();
for (int i = rect.Y; i < (rect.Y + rect.Height); i++)
{
for (int j = rect.X; j < (rect.X + rect.Width); j++)
{
var val2 = label_image.GetData(y, x)[0];
if (val2 != label_count)
{
continue;
}
blob.Push(new System.Drawing.Point[] { new System.Drawing.Point(j, i) });
}
}
blobs.Push(blob);
label_count++;
}
}
label_image.Save("labeled" + index + ".bmp");
return blobs;
}