本文整理汇总了C#中Mat类的典型用法代码示例。如果您正苦于以下问题:C# Mat类的具体用法?C# Mat怎么用?C# Mat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Mat类属于命名空间,在下文中一共展示了Mat类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: StopSignDetector
public StopSignDetector(IInputArray stopSignModel)
{
_detector = new SURF(500);
using (Mat redMask = new Mat())
{
GetRedPixelMask(stopSignModel, redMask);
_modelKeypoints = new VectorOfKeyPoint();
_modelDescriptors = new Mat();
_detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
if (_modelKeypoints.Size == 0)
throw new Exception("No image feature has been found in the stop sign model");
}
_modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
_modelDescriptorMatcher.Add(_modelDescriptors);
_octagon = new VectorOfPoint(
new Point[]
{
new Point(1, 0),
new Point(2, 0),
new Point(3, 1),
new Point(3, 2),
new Point(2, 3),
new Point(1, 3),
new Point(0, 2),
new Point(0, 1)
});
}
示例2: Start
// Use this for initialization
void Start()
{
String[] textureNames = new string[] { "stitch1", "stitch2", "stitch3", "stitch4"};
Mat[] imgs = new Mat[textureNames.Length];
Mat tmp = new Mat ();
for (int i = 0; i < textureNames.Length; i++) {
Texture2D tex = Resources.Load<Texture2D>(textureNames[i]);
imgs [i] = new Mat ();
TextureConvert.Texture2dToOutputArray(tex, tmp);
CvInvoke.Flip(tmp, tmp, FlipType.Vertical);
CvInvoke.CvtColor (tmp, imgs [i], ColorConversion.Bgra2Bgr);
if (imgs [i].IsEmpty)
Debug.Log ("Image " + i + " is empty");
else
Debug.Log ("Image " + i + " is " + imgs[i].NumberOfChannels + " channels " + imgs [i].Width + "x" + imgs [i].Height);
}
Emgu.CV.Stitching.Stitcher stitcher = new Emgu.CV.Stitching.Stitcher (false);
Mat result = new Mat ();
using (VectorOfMat vms = new VectorOfMat (imgs))
stitcher.Stitch (vms, result);
//CvInvoke.Flip(result, result, FlipType.Vertical);
Texture2D texture = TextureConvert.InputArrayToTexture2D(result, FlipType.Vertical);
this.GetComponent<GUITexture>().texture = texture;
Size s = result.Size;
this.GetComponent<GUITexture>().pixelInset = new Rect(-s.Width / 2, -s.Height / 2, s.Width, s.Height);
}
示例3: MatchBySurf
private void MatchBySurf(Mat src1, Mat src2)
{
var gray1 = new Mat();
var gray2 = new Mat();
Cv2.CvtColor(src1, gray1, ColorConversion.BgrToGray);
Cv2.CvtColor(src2, gray2, ColorConversion.BgrToGray);
var surf = new SURF(500, 4, 2, true);
// Detect the keypoints and generate their descriptors using SURF
KeyPoint[] keypoints1, keypoints2;
var descriptors1 = new MatOfFloat();
var descriptors2 = new MatOfFloat();
surf.Run(gray1, null, out keypoints1, descriptors1);
surf.Run(gray2, null, out keypoints2, descriptors2);
// Match descriptor vectors
var bfMatcher = new BFMatcher(NormType.L2, false);
var flannMatcher = new FlannBasedMatcher();
DMatch[] bfMatches = bfMatcher.Match(descriptors1, descriptors2);
DMatch[] flannMatches = flannMatcher.Match(descriptors1, descriptors2);
// Draw matches
var bfView = new Mat();
Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, bfMatches, bfView);
var flannView = new Mat();
Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, flannMatches, flannView);
using (new Window("SURF matching (by BFMather)", WindowMode.AutoSize, bfView))
using (new Window("SURF matching (by FlannBasedMatcher)", WindowMode.AutoSize, flannView))
{
Cv2.WaitKey();
}
}
示例4: Run
public void Run()
{
var dst = new Mat(FilePath.Lenna, LoadMode.Color);
var gray = new Mat(FilePath.Lenna, LoadMode.GrayScale);
StarDetector detector = new StarDetector(45);
KeyPoint[] keypoints = detector.Run(gray);
if (keypoints != null)
{
var color = new Scalar(0, 255, 0);
foreach (KeyPoint kpt in keypoints)
{
float r = kpt.Size / 2;
Cv2.Circle(dst, kpt.Pt, (int)r, color, 1, LineType.Link8, 0);
Cv2.Line(dst,
new Point2f(kpt.Pt.X + r, kpt.Pt.Y + r),
new Point2f(kpt.Pt.X - r, kpt.Pt.Y - r),
color, 1, LineType.Link8, 0);
Cv2.Line(dst,
new Point2f(kpt.Pt.X - r, kpt.Pt.Y + r),
new Point2f(kpt.Pt.X + r, kpt.Pt.Y - r),
color, 1, LineType.Link8, 0);
}
}
using (new Window("StarDetector features", dst))
{
Cv.WaitKey();
}
}
示例5: Start
// Use this for initialization
void Start()
{
Texture2D imgTexture = Resources.Load ("chessboard") as Texture2D;
Mat imgMat = new Mat (imgTexture.height, imgTexture.width, CvType.CV_8UC3);
Utils.texture2DToMat (imgTexture, imgMat);
Debug.Log ("imgMat dst ToString " + imgMat.ToString ());
Mat grayMat = new Mat ();
Imgproc.cvtColor (imgMat, grayMat, Imgproc.COLOR_RGB2GRAY);
Imgproc.Canny (grayMat, grayMat, 50, 200);
Mat lines = new Mat ();
Imgproc.HoughLinesP (grayMat, lines, 1, Mathf.PI / 180, 50, 50, 10);
// Debug.Log ("lines toStirng " + lines.ToString ());
// Debug.Log ("lines dump" + lines.dump ());
int[] linesArray = new int[lines.cols () * lines.rows () * lines.channels ()];
lines.get (0, 0, linesArray);
for (int i = 0; i < linesArray.Length; i=i+4) {
Core.line (imgMat, new Point (linesArray [i + 0], linesArray [i + 1]), new Point (linesArray [i + 2], linesArray [i + 3]), new Scalar (255, 0, 0), 2);
}
Texture2D texture = new Texture2D (imgMat.cols (), imgMat.rows (), TextureFormat.RGBA32, false);
Utils.matToTexture2D (imgMat, texture);
gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
}
示例6: Start
// Use this for initialization
void Start()
{
//Read the left and right images
Texture2D texLeft = Resources.Load ("tsukuba_l") as Texture2D;
Texture2D texRight = Resources.Load ("tsukuba_r") as Texture2D;
Mat imgLeft = new Mat (texLeft.height, texLeft.width, CvType.CV_8UC1);
Mat imgRight = new Mat (texRight.height, texRight.width, CvType.CV_8UC1);
Utils.texture2DToMat (texLeft, imgLeft);
Utils.texture2DToMat (texRight, imgRight);
//or
//Mat imgLeft = Imgcodecs.imread (Utils.getFilePath ("tsukuba_l.png"), Imgcodecs.IMREAD_GRAYSCALE);
//Mat imgRight = Imgcodecs.imread (Utils.getFilePath ("tsukuba_r.png"), Imgcodecs.IMREAD_GRAYSCALE);
Mat imgDisparity16S = new Mat (imgLeft.rows (), imgLeft.cols (), CvType.CV_16S);
Mat imgDisparity8U = new Mat (imgLeft.rows (), imgLeft.cols (), CvType.CV_8UC1);
if (imgLeft.empty () || imgRight.empty ()) {
Debug.Log ("Error reading images ");
}
StereoBM sbm = StereoBM.create (16, 15);
sbm.compute (imgLeft, imgRight, imgDisparity16S);
//normalize to CvType.CV_8U
Core.normalize (imgDisparity16S, imgDisparity8U, 0, 255, Core.NORM_MINMAX, CvType.CV_8U);
Texture2D texture = new Texture2D (imgDisparity8U.cols (), imgDisparity8U.rows (), TextureFormat.RGBA32, false);
Utils.matToTexture2D (imgDisparity8U, texture);
gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
}
示例7: Run
public void Run()
{
var capture = new VideoCapture();
capture.Set(CaptureProperty.FrameWidth, 640);
capture.Set(CaptureProperty.FrameHeight, 480);
capture.Open(-1);
if (!capture.IsOpened())
throw new Exception("capture initialization failed");
var fs = FrameSource.CreateCameraSource(-1);
var sr = SuperResolution.CreateBTVL1();
sr.SetInput(fs);
using (var normalWindow = new Window("normal"))
using (var srWindow = new Window("super resolution"))
{
var normalFrame = new Mat();
var srFrame = new Mat();
while (true)
{
capture.Read(normalFrame);
sr.NextFrame(srFrame);
if (normalFrame.Empty() || srFrame.Empty())
break;
normalWindow.ShowImage(normalFrame);
srWindow.ShowImage(srFrame);
Cv2.WaitKey(100);
}
}
}
示例8: ConvertToGrayScale
public static Mat ConvertToGrayScale(Mat mat)
{
Mat grayMat = new Mat();
Cv2.CvtColor(mat, grayMat, ColorConversion.RgbToGray);
return grayMat;
}
示例9: HDR
private static void HDR()
{
var hdr = CalibrateDebevec.Create();
Mat[] images = new Mat[3];
images[0] = Cv2.ImRead(@"data\lenna.png", ImreadModes.AnyColor);
images[1] = Cv2.ImRead(@"data\lenna.png", ImreadModes.AnyColor);
images[2] = Cv2.ImRead(@"data\lenna.png", ImreadModes.AnyColor);
float[] speeds = new float[3];
speeds[0] = 1;
speeds[1] = 1;
speeds[2] = 1;
Mat dst = new Mat();
hdr.Process(images, dst, speeds);
dst.ToString();
for (int i = 0; i < Math.Max(dst.Rows, dst.Cols); i++)
{
Console.WriteLine(dst.At<float>(i));
}
}
示例10: ToBitmapGrayScale
public void ToBitmapGrayScale()
{
Mat img = new Mat(FilePath.Lenna511, LoadMode.GrayScale); // width % 4 != 0
Bitmap bitmap = BitmapConverter2.ToBitmap(img);
// Bitmap bitmap = img.ToBitmap();
using (var form = new Form())
using (var pb = new PictureBox())
{
pb.Image = bitmap;
var size = new System.Drawing.Size(bitmap.Width, bitmap.Height);
pb.ClientSize = size;
form.ClientSize = size;
form.Controls.Add(pb);
form.KeyPreview = true;
form.KeyDown += (sender, args) =>
{
if (args.KeyCode.HasFlag(Keys.Enter))
((Form)sender).Close();
};
form.Text = "Grayscale Mat to Bitmap Test";
form.ShowDialog();
}
}
示例11: usingCppInterface1
private static void usingCppInterface1()
{
// Cv2.ImRead
using (var src = new Mat(@"..\..\Images\Penguin.Png", LoadMode.AnyDepth | LoadMode.AnyColor))
using (var dst = new Mat())
{
src.CopyTo(dst);
for (var y = 0; y < src.Height; y++)
{
for (var x = 0; x < src.Width; x++)
{
var pixel = src.Get<Vec3b>(y, x);
var newPixel = new Vec3b
{
Item0 = (byte)(255 - pixel.Item0), // B
Item1 = (byte)(255 - pixel.Item1), // G
Item2 = (byte)(255 - pixel.Item2) // R
};
dst.Set(y, x, newPixel);
}
}
// [Cpp] Accessing Pixel
// https://github.com/shimat/opencvsharp/wiki/%5BCpp%5D-Accessing-Pixel
//Cv2.NamedWindow();
//Cv2.ImShow();
using (new Window("C++ Interface: Src", image: src))
using (new Window("C++ Interface: Dst", image: dst))
{
Cv2.WaitKey(0);
}
}
}
示例12: Update
// Update is called once per frame
void Update () {
cap.Read (frame);
if (!frame.Empty()){
//assume this part of the frame contains only background
smoothed_img = frame.Blur(new Size(5,5));
frame_hsv = frame.CvtColor (ColorConversionCodes.BGR2HSV);
Scalar lb = new Scalar (0, 0, 50);
Scalar ub = new Scalar (180, 70, 180);
Mat disc = Cv2.GetStructuringElement (MorphShapes.Ellipse, new Size (7, 7));
Cv2.MorphologyEx (thresh, thresh, MorphTypes.Close, disc,null,3);
contours = Cv2.FindContoursAsMat (thresh , RetrievalModes.List, ContourApproximationModes.ApproxSimple);
mask = new Mat (thresh.Size (), thresh.Type (), Scalar.All (0));
Cv2.Merge(new Mat[]{mask,mask,mask},mask);
Cv2.BitwiseAnd (mask, frame, mask);
//Cv2.Merge(new Mat[]{frame_backproj,frame_backproj,frame_backproj},frame_backproj);
tex.LoadImage (smoothed_img.ToBytes (".png", new int[]{ 0 }));
}
}
示例13: ProcessImage
private void ProcessImage(IInputOutputArray image)
{
Stopwatch watch = Stopwatch.StartNew(); // time the detection process
List<IInputOutputArray> licensePlateImagesList = new List<IInputOutputArray>();
List<IInputOutputArray> filteredLicensePlateImagesList = new List<IInputOutputArray>();
List<RotatedRect> licenseBoxList = new List<RotatedRect>();
List<string> words = _licensePlateDetector.DetectLicensePlate(
image,
licensePlateImagesList,
filteredLicensePlateImagesList,
licenseBoxList);
watch.Stop(); //stop the timer
processTimeLabel.Text = String.Format("License Plate Recognition time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);
panel1.Controls.Clear();
Point startPoint = new Point(10, 10);
for (int i = 0; i < words.Count; i++)
{
Mat dest = new Mat();
CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest);
AddLabelAndImage(
ref startPoint,
String.Format("License: {0}", words[i]),
dest);
PointF[] verticesF = licenseBoxList[i].GetVertices();
Point[] vertices = Array.ConvertAll(verticesF, Point.Round);
using(VectorOfPoint pts = new VectorOfPoint(vertices))
CvInvoke.Polylines(image, pts, true, new Bgr(Color.Red).MCvScalar,2 );
}
}
示例14: hammDistMarker
/// <summary>
/// Hamms the dist marker.
/// </summary>
/// <returns>The dist marker.</returns>
/// <param name="bits">Bits.</param>
public static int hammDistMarker (Mat bits, byte[,] markerDesign)
{
int dist = 0;
int size = markerDesign.GetLength(0);
byte[] b = new byte[size * size];
bits.get (0, 0, b);
for (int y=0; y<size; y++) {
int sum = 0;
for (int x=0; x<size; x++) {
sum += (b [y*size + x] == markerDesign [y,x]) ? 0 : 1;
}
dist += sum;
}
return dist;
}
示例15: DetectFace
/// <summary>
///
/// </summary>
/// <param name="cascade"></param>
/// <returns></returns>
private Mat DetectFace(CascadeClassifier cascade)
{
Mat result;
using (var src = new Mat(FilePath.Image.Yalta, LoadMode.Color))
using (var gray = new Mat())
{
result = src.Clone();
Cv2.CvtColor(src, gray, ColorConversion.BgrToGray, 0);
// Detect faces
Rect[] faces = cascade.DetectMultiScale(
gray, 1.08, 2, HaarDetectionType.ScaleImage, new Size(30, 30));
// Render all detected faces
foreach (Rect face in faces)
{
var center = new Point
{
X = (int)(face.X + face.Width * 0.5),
Y = (int)(face.Y + face.Height * 0.5)
};
var axes = new Size
{
Width = (int)(face.Width * 0.5),
Height = (int)(face.Height * 0.5)
};
Cv2.Ellipse(result, center, axes, 0, 0, 360, new Scalar(255, 0, 255), 4);
}
}
return result;
}