本文整理汇总了C#中Mat.Dispose方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.Dispose方法的具体用法?C# Mat.Dispose怎么用?C# Mat.Dispose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat.Dispose方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: useTrainedData
private static void useTrainedData()
{
var srcImage = new Mat(@"..\..\CarData\CarData\TestImages_Scale\test-1.pgm");
Cv2.ImShow("Source", srcImage);
Cv2.WaitKey(1); // do events
var grayImage = new Mat();
Cv2.CvtColor(srcImage, grayImage, ColorConversion.BgrToGray);
Cv2.EqualizeHist(grayImage, grayImage);
var cascade = new CascadeClassifier(@"..\..\CarsInfo\data\cascade.xml");
var cars = cascade.DetectMultiScale(
image: grayImage,
scaleFactor: 1.1,
minNeighbors: 2,
flags: HaarDetectionType.Zero | HaarDetectionType.ScaleImage,
minSize: new Size(30, 30)
);
Console.WriteLine("Detected cars: {0}", cars.Length);
var rnd = new Random();
var count = 1;
foreach (var carRect in cars)
{
var detectedFaceImage = new Mat(srcImage, carRect);
Cv2.ImShow(string.Format("Car {0}", count), detectedFaceImage);
Cv2.WaitKey(1); // do events
var color = Scalar.FromRgb(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255));
Cv2.Rectangle(srcImage, carRect, color, 3);
var detectedFaceGrayImage = new Mat();
Cv2.CvtColor(detectedFaceImage, detectedFaceGrayImage, ColorConversion.BgrToGray);
count++;
}
Cv2.ImShow("Haar Detection", srcImage);
Cv2.WaitKey(1); // do events
Cv2.WaitKey(0);
Cv2.DestroyAllWindows();
srcImage.Dispose();
}
示例2: Main
static void Main(string[] args)
{
var img1 = new Mat(@"..\..\Images\left.png", LoadMode.GrayScale);
Cv2.ImShow("Left", img1);
Cv2.WaitKey(1); // do events
var img2 = new Mat(@"..\..\Images\right.png", LoadMode.GrayScale);
Cv2.ImShow("Right", img2);
Cv2.WaitKey(1); // do events
// detecting keypoints
// FastFeatureDetector, StarDetector, SIFT, SURF, ORB, BRISK, MSER, GFTTDetector, DenseFeatureDetector, SimpleBlobDetector
// SURF = Speeded Up Robust Features
var detector = new SURF(hessianThreshold: 400); //A good default value could be from 300 to 500, depending from the image contrast.
var keypoints1 = detector.Detect(img1);
var keypoints2 = detector.Detect(img2);
// computing descriptors, BRIEF, FREAK
// BRIEF = Binary Robust Independent Elementary Features
var extractor = new BriefDescriptorExtractor();
var descriptors1 = new Mat();
var descriptors2 = new Mat();
extractor.Compute(img1, ref keypoints1, descriptors1);
extractor.Compute(img2, ref keypoints2, descriptors2);
// matching descriptors
var matcher = new BFMatcher();
var matches = matcher.Match(descriptors1, descriptors2);
// drawing the results
var imgMatches = new Mat();
Cv2.DrawMatches(img1, keypoints1, img2, keypoints2, matches, imgMatches);
Cv2.ImShow("Matches", imgMatches);
Cv2.WaitKey(1); // do events
Cv2.WaitKey(0);
Cv2.DestroyAllWindows();
img1.Dispose();
img2.Dispose();
}
示例3: CaliblationUpdate
//キャリブレーション
public void CaliblationUpdate(ShadowPackage proccesedMatPckage)
{
Mat srcImg = new Mat();
//入力画像
srcImg = proccesedMatPckage.srcMat.Clone();
this.imgChannles = srcImg.Channels();
#region
if (this.fragCutImg == 1)
{
this.cutRect = new CutRect(srcImg);
this.fragCutImg = 0;
this.imageWidth = srcImg.Width;
this.imageHeight = srcImg.Height;
}
#endregion
//座標の取得
this.backIn_Pt = this.changePt(this._getPtForCalib(0));
this.backOut_Pt = this.changePt(this._getPtForCalib(1));
this.floorIn_Pt = this.changePt(this._getPtForCalib(2));
this.floorOut_Pt = this.changePt(this._getPtForCalib(3));
this.backOut_Pt = this.changePtRange(this.backOut_Pt);
this.floorOut_Pt = this.changePtRange(this.floorOut_Pt);
//back
#region
this.backIn_Pt = this.changePtRange(this.backIn_Pt);
this.backDstImg = this.PerspectiveProject(srcImg, this.backIn_Pt, this.backOut_Pt).Clone();
this.backDstImg = cutRect.CutImage(this.backDstImg,this.backOut_Pt).Clone();
#endregion
//floor
#region
this.floorIn_Pt = this.changePtRange(this.floorIn_Pt);
this.floorDstImg = this.PerspectiveProject(srcImg, this.floorIn_Pt, this.floorOut_Pt).Clone();
this.floorDstImg = cutRect.CutImage(this.floorDstImg, this.floorOut_Pt).Clone();
#endregion
srcImg.Dispose();
}
示例4: getHandPicturesInRange
public IplImage getHandPicturesInRange(IplImage source)
{
//白黒変換
IplImage image = source.Clone();
IplImage gray = Cv.CreateImage(new CvSize(image.Width, image.Height), BitDepth.U8, 1);
Cv.CvtColor(image, image, ColorConversion.RgbToHsv);
Cv.Smooth(image, image, SmoothType.Median, 7);
Scalar s1 = new Scalar(130, 50, 50);
Scalar s2 = new Scalar(360, 255, 255);
Cv.InRangeS(image, s1, s2, gray);
Cv.Threshold(gray, gray, 0, 255, ThresholdType.Binary);
//Cv.Canny(gray, gray, 100, 200, ApertureSize.Size3);
//Cv.Laplace(gray, gray, ApertureSize.Size1);
Mat sourceMat = new Mat(source);
Mat grayMat = new Mat(gray);
Mat outputMat = new Mat();
Cv2.BitwiseAnd(sourceMat, sourceMat, outputMat, grayMat);
IplImage returnIPL = outputMat.ToIplImage();
image.Dispose();
gray.Dispose();
sourceMat.Dispose();
grayMat.Dispose();
outputMat.Dispose();
return returnIPL;
}
示例5: getHandPicturesGMG
public IplImage getHandPicturesGMG(IplImage source)
{
Mat frame = new Mat(), foregroundMask = new Mat(), output = new Mat();
frame = new Mat(source);
backgroundSubtractor.Run(frame, foregroundMask);
Cv2.BitwiseAnd(frame, frame, output, foregroundMask);
IplImage returnIPL = output.ToIplImage();
frame.Dispose();
foregroundMask.Dispose();
output.Dispose();
return returnIPL;
}
示例6: estimatePosition
/// <summary>
/// Estimates the position.
/// </summary>
/// <param name="detectedMarkers">Detected markers.</param>
void estimatePosition (List<Marker> detectedMarkers)
{
for (int i=0; i<detectedMarkers.Count; i++) {
Marker m = detectedMarkers [i];
Mat Rvec = new Mat ();
Mat Tvec = new Mat ();
Mat raux = new Mat ();
Mat taux = new Mat ();
Calib3d.solvePnP (m_markerCorners3d, new MatOfPoint2f (m.points.toArray ()), camMatrix, distCoeff, raux, taux);
raux.convertTo (Rvec, CvType.CV_32F);
taux.convertTo (Tvec, CvType.CV_32F);
Mat rotMat = new Mat (3, 3, CvType.CV_64FC1);
Calib3d.Rodrigues (Rvec, rotMat);
m.transformation.SetRow (0, new Vector4 ((float)rotMat.get (0, 0) [0], (float)rotMat.get (0, 1) [0], (float)rotMat.get (0, 2) [0], (float)Tvec.get (0, 0) [0]));
m.transformation.SetRow (1, new Vector4 ((float)rotMat.get (1, 0) [0], (float)rotMat.get (1, 1) [0], (float)rotMat.get (1, 2) [0], (float)Tvec.get (1, 0) [0]));
m.transformation.SetRow (2, new Vector4 ((float)rotMat.get (2, 0) [0], (float)rotMat.get (2, 1) [0], (float)rotMat.get (2, 2) [0], (float)Tvec.get (2, 0) [0]));
m.transformation.SetRow (3, new Vector4 (0, 0, 0, 1));
// Debug.Log ("m.transformation " + m.transformation.ToString ());
Rvec.Dispose ();
Tvec.Dispose ();
raux.Dispose ();
taux.Dispose ();
rotMat.Dispose ();
}
}
示例7: getMarkerId
/// <summary>
/// Gets the marker identifier.
/// </summary>
/// <returns>The marker identifier.</returns>
/// <param name="markerImage">Marker image.</param>
/// <param name="nRotations">N rotations.</param>
public static int getMarkerId (Mat markerImage, MatOfInt nRotations, byte[,] markerDesign)
{
Mat grey = markerImage;
// Threshold image
Imgproc.threshold (grey, grey, 125, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
//Markers are divided in 7x7 regions, of which the inner 5x5 belongs to marker info
//the external border should be entirely black
int size = markerDesign.GetLength(0);
int cellSize = markerImage.rows () / (size + 2);
for (int y=0; y<(size+2); y++) {
int inc = size + 1;
if (y == 0 || y == (size + 1))
inc = 1; //for first and last row, check the whole border
for (int x=0; x<(size+2); x+=inc) {
int cellX = x * cellSize;
int cellY = y * cellSize;
Mat cell = new Mat (grey, new OpenCVForUnity.Rect (cellX, cellY, cellSize, cellSize));
int nZ = Core.countNonZero (cell);
cell.Dispose ();
if (nZ > (cellSize * cellSize) / 2) {
return -1;//can not be a marker because the border element is not black!
}
}
}
Mat bitMatrix = Mat.zeros (size, size, CvType.CV_8UC1);
//get information(for each inner square, determine if it is black or white)
for (int y=0; y<size; y++) {
for (int x=0; x<size; x++) {
int cellX = (x + 1) * cellSize;
int cellY = (y + 1) * cellSize;
Mat cell = new Mat (grey, new OpenCVForUnity.Rect (cellX, cellY, cellSize, cellSize));
int nZ = Core.countNonZero (cell);
if (nZ > (cellSize * cellSize) / 2)
bitMatrix.put (y, x, new byte[]{1});
//bitMatrix.at<uchar> (y, x) = 1;
cell.Dispose ();
}
}
// Debug.Log ("bitMatrix " + bitMatrix.dump());
//check all possible rotations
Mat[] rotations = new Mat[4];
for (int i = 0; i < rotations.Length; i++) {
rotations [i] = new Mat ();
}
int[] distances = new int[4];
rotations [0] = bitMatrix;
distances [0] = hammDistMarker (rotations [0], markerDesign);
int first = distances [0];
int second = 0;
for (int i=1; i<4; i++) {
//get the hamming distance to the nearest possible word
rotations [i] = rotate (rotations [i - 1]);
distances [i] = hammDistMarker (rotations [i], markerDesign);
if (distances [i] < first) {
first = distances [i];
second = i;
}
}
// Debug.Log ("first " + first);
nRotations.fromArray (second);
if (first == 0) {
int id = mat2id (rotations [second]);
//.........这里部分代码省略.........
示例8: watershedExample
//.........这里部分代码省略.........
var key = Cv2.WaitKey(0);
if ((char)key == 27) // ESC
{
break;
}
if ((char)key == 'r') // Reset
{
markerMask = new Mat(markerMask.Size(), markerMask.Type(), s: Scalar.All(0));
src.CopyTo(srcCopy);
sourceWindow.Image = srcCopy;
}
if ((char)key == 'w' || (char)key == ' ') // Apply watershed
{
Point[][] contours; //vector<vector<Point>> contours;
HiearchyIndex[] hierarchyIndexes; //vector<Vec4i> hierarchy;
Cv2.FindContours(
markerMask,
out contours,
out hierarchyIndexes,
mode: ContourRetrieval.CComp,
method: ContourChain.ApproxSimple);
if (contours.Length == 0)
{
continue;
}
var markers = new Mat(markerMask.Size(), MatType.CV_32S, s: Scalar.All(0));
var componentCount = 0;
var contourIndex = 0;
while ((contourIndex >= 0))
{
Cv2.DrawContours(
markers,
contours,
contourIndex,
color: Scalar.All(componentCount+1),
thickness: -1,
lineType: LineType.Link8,
hierarchy: hierarchyIndexes,
maxLevel: int.MaxValue);
componentCount++;
contourIndex = hierarchyIndexes[contourIndex].Next;
}
if (componentCount == 0)
{
continue;
}
var colorTable = new List<Vec3b>();
for (var i = 0; i < componentCount; i++)
{
var b = rnd.Next(0, 255); //Cv2.TheRNG().Uniform(0, 255);
var g = rnd.Next(0, 255); //Cv2.TheRNG().Uniform(0, 255);
var r = rnd.Next(0, 255); //Cv2.TheRNG().Uniform(0, 255);
colorTable.Add(new Vec3b((byte)b, (byte)g, (byte)r));
}
Cv2.Watershed(src, markers);
var watershedImage = new Mat(markers.Size(), MatType.CV_8UC3);
// paint the watershed image
for (var i = 0; i < markers.Rows; i++)
{
for (var j = 0; j < markers.Cols; j++)
{
var idx = markers.At<int>(i, j);
if (idx == -1)
{
watershedImage.Set(i, j, new Vec3b(255, 255, 255));
}
else if (idx <= 0 || idx > componentCount)
{
watershedImage.Set(i, j, new Vec3b(0, 0, 0));
}
else
{
watershedImage.Set(i, j, colorTable[idx - 1]);
}
}
}
watershedImage = watershedImage * 0.5 + imgGray * 0.5;
Cv2.ImShow("Watershed Transform", watershedImage);
Cv2.WaitKey(1); //do events
}
}
sourceWindow.Dispose();
Cv2.DestroyAllWindows();
src.Dispose();
}