当前位置: 首页>>代码示例>>C#>>正文


C# Mat.ToImage方法代码示例

本文整理汇总了C#中Mat.ToImage方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.ToImage方法的具体用法?C# Mat.ToImage怎么用?C# Mat.ToImage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat的用法示例。


在下文中一共展示了Mat.ToImage方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: TestCodeBookBGModel

      /*
      public void TestCodeBookBGModel()
      {
         using (Capture capture = new Capture())
         using (BGCodeBookModel<Bgr> model = new BGCodeBookModel<Bgr>())
         {
            ImageViewer viewer = new ImageViewer();
            Image<Gray, byte> fgMask = capture.QueryFrame().Convert<Gray, Byte>();

            Application.Idle += delegate(Object sender, EventArgs args)
            {
               Mat frame = capture.QueryFrame();
               model.Apply(frame);
               viewer.Image = model.ForegroundMask; 
            };
            viewer.ShowDialog();
         }
      }

      public void TestBlobTracking()
      {
         MCvFGDStatModelParams fgparam = new MCvFGDStatModelParams();
         fgparam.alpha1 = 0.1f;
         fgparam.alpha2 = 0.005f;
         fgparam.alpha3 = 0.1f;
         fgparam.delta = 2;
         fgparam.is_obj_without_holes = 1;
         fgparam.Lc = 32;
         fgparam.Lcc = 16;
         fgparam.minArea = 15;
         fgparam.N1c = 15;
         fgparam.N1cc = 25;
         fgparam.N2c = 25;
         fgparam.N2cc = 35;
         fgparam.perform_morphing = 0;
         fgparam.T = 0.9f;

         BlobTrackerAutoParam<Bgr> param = new BlobTrackerAutoParam<Bgr>();
         param.BlobDetector = new BlobDetector(Emgu.CV.CvEnum.BlobDetectorType.CC);
         param.FGDetector = new FGDetector<Bgr>(Emgu.CV.CvEnum.ForgroundDetectorType.Fgd, fgparam);
         param.BlobTracker = new BlobTracker(Emgu.CV.CvEnum.BLOBTRACKER_TYPE.MSFG);
         param.FGTrainFrames = 10;
         BlobTrackerAuto<Bgr> tracker = new BlobTrackerAuto<Bgr>(param);

         //MCvFont font = new MCvFont(Emgu.CV.CvEnum.FontFace.HersheySimplex, 1.0, 1.0);

         using(ImageViewer viewer = new ImageViewer())
         using (Capture capture = new Capture())
         {
            capture.ImageGrabbed += delegate(object sender, EventArgs e)
            {
               tracker.Process(capture.RetrieveBgrFrame());
               
               //Image<Bgr, Byte> img = capture.RetrieveBgrFrame();

               Image<Bgr, Byte> img = tracker.ForegroundMask.Convert<Bgr, Byte>();
               foreach (MCvBlob blob in tracker)
               {
                  img.Draw((Rectangle)blob, new Bgr(255.0, 255.0, 255.0), 2);
                  img.Draw(blob.ID.ToString(), Point.Round(blob.Center), CvEnum.FontFace.HersheySimplex, 1.0, new Bgr(255.0, 255.0, 255.0));
               }
               viewer.Image = img;
            };
            capture.Start();
            viewer.ShowDialog();
         }
      }*/
      
      public void TestCvBlob()
      {
         //MCvFont font = new MCvFont(Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, 0.5);
         using (CvTracks tracks = new CvTracks())
         using (ImageViewer viewer = new ImageViewer())
         using (Capture capture = new Capture())
         using (Mat fgMask = new Mat())
         {
            //BGStatModel<Bgr> bgModel = new BGStatModel<Bgr>(capture.QueryFrame(), Emgu.CV.CvEnum.BG_STAT_TYPE.GAUSSIAN_BG_MODEL);
            BackgroundSubtractorMOG2 bgModel = new BackgroundSubtractorMOG2(0, 0, true);
            //BackgroundSubstractorMOG bgModel = new BackgroundSubstractorMOG(0, 0, 0, 0);

            capture.ImageGrabbed += delegate(object sender, EventArgs e)
            {
               Mat frame = new Mat();
               capture.Retrieve(frame);
               bgModel.Apply(frame, fgMask);

               using (CvBlobDetector detector = new CvBlobDetector())
               using (CvBlobs blobs = new CvBlobs())
               {
                  detector.Detect(fgMask.ToImage<Gray, Byte>(), blobs);
                  blobs.FilterByArea(100, int.MaxValue);

                  tracks.Update(blobs, 20.0, 10, 0);

                  Image<Bgr, Byte> result = new Image<Bgr, byte>(frame.Size);

                  using (Image<Gray, Byte> blobMask = detector.DrawBlobsMask(blobs))
                  {
                     frame.CopyTo(result, blobMask);
                  }
//.........这里部分代码省略.........
开发者ID:Warren-GH,项目名称:emgucv,代码行数:101,代码来源:Class1.cs

示例2: ImageGrabbedHandler

        public override void ImageGrabbedHandler(object sender, EventArgs e)
        {
            using (var frame = new Mat())
            {
                CameraCapture.Retrieve(frame);
                var inputImage = frame.ToImage<Bgr, byte>();

                if (radTrackingApi.Checked)
                {
                    inputImage = DoTrackingApi(frame, inputImage);
                }
                else if (radCamshift.Checked)
                {
                    var output = DoCamShift(frame, inputImage);
                    imageBoxProcessed.Image = output.BackProjection;
                }

                if (!_imageBoxSelector.SeedingRectangle.IsEmpty)
                {
                    inputImage.Draw(_imageBoxSelector.SeedingRectangle, new Bgr(Color.Chartreuse));
                }

                imageBoxTracking.Image = inputImage;
            }
        }
开发者ID:neutmute,项目名称:PiCamCV,代码行数:25,代码来源:TrackingControl.cs

示例3: GetMotionInfo

        private MotionInfo GetMotionInfo(Mat image)
        {
            Mat _forgroundMask = new Mat();
            Mat _segMask = new Mat();
            MotionInfo motionInfoObj = new MotionInfo();
            double minArea, angle, objectCount, totalPixelCount;
            double overallangle = 0;
            double  motionPixelCount =0;
            int motionArea =0;
            totalPixelCount = 0;
            objectCount = 0;
            minArea = 800;

            if (foregroundDetector == null)
            {
                foregroundDetector = new BackgroundSubtractorMOG2();
            }

            foregroundDetector.Apply(image, _forgroundMask);

            _motionHistory.Update(_forgroundMask);

            ImageForeGroundMaskLast = _forgroundMask.ToImage<Bgr, byte>();

            #region get a copy of the motion mask and enhance its color
            double[] minValues, maxValues;
            Point[] minLoc, maxLoc;
            _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc);
            Mat motionMask = new Mat();
            using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0]))
                CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U);
            //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]);
            #endregion

            //create the motion image
            Image<Bgr, Byte> motionImage = new Image<Bgr, byte>(motionMask.Size);
            //display the motion pixels in blue (first channel)
            //motionImage[0] = motionMask;
            CvInvoke.InsertChannel(motionMask, motionImage, 0);

            //Threshold to define a motion area, reduce the value to detect smaller motion
            minArea = 100;
             //storage.Clear(); //clear the storage
             Rectangle[] rects;

             using (VectorOfRect boundingRect = new VectorOfRect())
             {
             _motionHistory.GetMotionComponents(_segMask, boundingRect);
             rects = boundingRect.ToArray();
             }

             //iterate through each of the motion component
             foreach (Rectangle comp in rects)
             {
            int area = comp.Width * comp.Height;
            //reject the components that have small area;
            _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount);
            if (area < minArea) continue;
            else
            {
                overallangle = overallangle + angle;
                totalPixelCount = totalPixelCount + motionPixelCount;
                objectCount = objectCount + 1;
                motionArea = motionArea + area;
            }

            // find the angle and motion pixel count of the specific area

            ////Draw each individual motion in red
            //DrawMotion(motionImage, comp, angle, new Bgr(Color.Red));
             }
            motionInfoObj.MotionArea = motionArea;
            motionInfoObj.OverallAngle = overallangle;
            motionInfoObj.BoundingRect = rects;
            motionInfoObj.TotalMotions = rects.Length;
            motionInfoObj.MotionObjects = objectCount;
            motionInfoObj.MotionPixels = totalPixelCount;
            averagetotalPixelCount = 0.75 * averagetotalPixelCount + 0.25 * totalPixelCount;
            if ( Math.Abs(averagetotalPixelCount - totalPixelCount) / averagetotalPixelCount > 0.59)
                Console.WriteLine(" GetMotionInfo - Total Motions found: " + rects.Length + "; Motion Pixel count: " + totalPixelCount);
             return motionInfoObj;
        }
开发者ID:kmacpher67,项目名称:VVGLookingWindow,代码行数:82,代码来源:FaceCapture.cs

示例4: ProcessFrame

        private void ProcessFrame(object sender, EventArgs arg)
        {
            Mat frame = new Mat();
             //_capture.Retrieve(frame, 0);
             frame = new Mat("C:\\Emgu\\Dump\\ea6b5b28a66c.jpg", LoadImageType.Unchanged);
             Mat grayFrame = new Mat();
             CvInvoke.CvtColor(frame, grayFrame, ColorConversion.Bgr2Gray);
             Mat smallGrayFrame = new Mat();
             CvInvoke.PyrDown(grayFrame, smallGrayFrame);
             Mat smoothedGrayFrame = new Mat();
             CvInvoke.PyrUp(smallGrayFrame, smoothedGrayFrame);

             CvInvoke.Threshold(smoothedGrayFrame, smoothedGrayFrame, 100, 255, ThresholdType.Binary);
             //Image<Gray, Byte> smallGrayFrame = grayFrame.PyrDown();
             //Image<Gray, Byte> smoothedGrayFrame = smallGrayFrame.PyrUp();
             Mat cannyFrame = new Mat();
             CvInvoke.Canny(smoothedGrayFrame, cannyFrame, 100, 60);

             //Image<Gray, Byte> cannyFrame = smoothedGrayFrame.Canny(100, 60);

             VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
             CvInvoke.FindContours(cannyFrame, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
             CvInvoke.DrawContours(frame, contours, 2, new Bgr(Color.Blue).MCvScalar);
             List<RotatedRect> BL = new List<RotatedRect>();
             List<VectorOfPoint> CL = new List<VectorOfPoint>();
             for (int i = 0; i < contours.Size; i++)
             {
               using (VectorOfPoint contour = contours[i])
               using (VectorOfPoint approxContour = new VectorOfPoint())
               {
             CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);
             BL.Add(CvInvoke.MinAreaRect(approxContour));
             CL.Add(contour);
               }
             }

             VectorOfPoint maxContour = CL[0];
            double maxContourArea = CvInvoke.ContourArea(CL[0], false);
             for (int i = 0; i < CL.Count; i++)
             {
               if (CvInvoke.ContourArea(CL[i], false) > maxContourArea)
               {
             maxContourArea = CvInvoke.ContourArea(CL[i], false);
             maxContour = CL[i];
               }
             }

             RotatedRect TMP = new RotatedRect();
             TMP = CvInvoke.MinAreaRect(maxContour);
             CvInvoke.Polylines(frame, Array.ConvertAll(TMP.GetVertices(), Point.Round), true, new Bgr(Color.Pink).MCvScalar, 2);

             Image<Bgr, Byte> srcImg = frame.ToImage<Bgr, Byte>();
             srcImg.ROI = new Rectangle((int)(TMP.Center.X - 0.5 * TMP.Size.Width), (int)(TMP.Center.Y - 0.5 * TMP.Size.Height), (int)TMP.Size.Width, (int)TMP.Size.Height);
             Image<Bgr, Byte> croppedImg = srcImg.Copy();
             cannyImageBox.Image = croppedImg;
             float[,] tmp = {
                            {0, frame.Height}, //down
                            {0, 0},//left
                            {frame.Width, 0}, // up
                            {frame.Width, frame.Height} //right
                       };
             Matrix<float> sourceMat = new Matrix<float>(tmp);
             float[,] target = {
                            {0, (float)0.85 * frame.Height},
                            {0, 0},
                            {(float)0.85*frame.Width, 0},
                            {(float)0.55*frame.Width, (float)0.55*frame.Height}
                       };
             PointF[] tmpPF = new PointF[4];
             PointF[] targetPF = new PointF[4];

             for (int i = 0; i < 4; i++)
             {
               tmpPF[i].X = tmp[i, 0]; tmpPF[i].Y = tmp[i, 1];
               targetPF[i].X = target[i, 0]; targetPF[i].Y = target[i, 1];
             }

             Matrix<float> targetMat = new Matrix<float>(target);
             Mat TTT = CvInvoke.GetPerspectiveTransform(tmpPF, targetPF);
             Mat newcroppimg = new Mat();
             CvInvoke.WarpPerspective(croppedImg, newcroppimg, TTT, new System.Drawing.Size(241, 240));

            //CvInvoke.DrawContours(frame, TMP, 2, new Bgr(Color.Red).MCvScalar);

             /*
            foreach (RotatedRect box in BL)
             {
               CvInvoke.Polylines(frame, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.DarkOrange).MCvScalar, 2);
             }*/

             captureImageBox.Image = frame;
             grayscaleImageBox.Image = newcroppimg;
             smoothedGrayscaleImageBox.Image = smoothedGrayFrame;
             //cannyImageBox.Image = cannyFrame;
        }
开发者ID:Lerbytech,项目名称:CameraCapture,代码行数:95,代码来源:CameraCapture.cs


注:本文中的Mat.ToImage方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。