当前位置: 首页>>代码示例>>C#>>正文


C# Mat.SetTo方法代码示例

本文整理汇总了C#中Mat.SetTo方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.SetTo方法的具体用法?C# Mat.SetTo怎么用?C# Mat.SetTo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat的用法示例。


在下文中一共展示了Mat.SetTo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: FindMatch

      public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
      {
         int k = 2;
         double uniquenessThreshold = 0.80;

         Stopwatch watch;
         homography = null;

         modelKeyPoints = new VectorOfKeyPoint();
         observedKeyPoints = new VectorOfKeyPoint();

         using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
         using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
         {
            KAZE featureDetector = new KAZE();

            //extract features from the object image
            Mat modelDescriptors = new Mat();
            featureDetector.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

            watch = Stopwatch.StartNew();

            // extract features from the observed image
            Mat observedDescriptors = new Mat();
            featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            
            matcher.Add(modelDescriptors);

            matcher.KnnMatch(observedDescriptors, matches, k, null);
            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

            int nonZeroCount = CvInvoke.CountNonZero(mask);
            if (nonZeroCount >= 4)
            {
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                  matches, mask, 1.5, 20);
               if (nonZeroCount >= 4)
                  homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                     observedKeyPoints, matches, mask, 2);
            }

            watch.Stop();

         }
         matchTime = watch.ElapsedMilliseconds;
      }
开发者ID:neutmute,项目名称:emgucv,代码行数:49,代码来源:DrawMatches.cs

示例2: FilterTiles

        public void FilterTiles(Mat image, Mat modifiedMat)
        {
            CvInvoke.Imshow("0", image);
            
            Stopwatch sw1 = new Stopwatch();
            sw1.Start();
            Mat laplaced = new Mat();
            CvInvoke.CvtColor(image, laplaced, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
            Mat greyResult = laplaced.Clone();
            Mat greySource = laplaced.Clone();

            Mat cannySrc = new Mat();

            //if not half inch, do canny and subtract to separate tiles better. Basically "sharpens" the edge
            if (scan.TileSettings.CannyEdges)
            {
                //create canny image, these parameters could be adjusted probably?
                CvInvoke.Canny(greySource, greyResult, 50, 150);
                //dilate canny                

                CvInvoke.Dilate(greyResult, greyResult, null, new System.Drawing.Point(1, 1), scan.TileSettings.CannyDilate, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
                CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(1, 1), scan.TileSettings.CannyDilate, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
                
                CvInvoke.Imshow("1a", greyResult);

                //subtract dilated canny from source to get separation
                CvInvoke.Subtract(greySource, greyResult, greyResult);
                greySource = greyResult.Clone();
                CvInvoke.Imshow("1b", greyResult);
            }

            if (scan.TileSettings.ThresholdEdges)
            {
                Mat edges = new Mat();
                CvInvoke.Threshold(greyResult, edges, (float)thresholdTrackbar.Value, 0, ThresholdType.ToZero);
                CvInvoke.Subtract(greySource, edges, greyResult);
                CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(1, 1), 2, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
                CvInvoke.Imshow("pres-1c", greyResult);
             }
            //perform distance transform
            CvInvoke.DistanceTransform(greyResult, greyResult, null, DistType.L2, 5);
            //normalize the image to bring out the peaks
            CvInvoke.Normalize(greyResult, greyResult, 0, 1, NormType.MinMax);
            CvInvoke.Imshow("2", greyResult);

            //threshold the image, different thresholds for different tiles

            CvInvoke.Threshold(greyResult, greyResult, scan.TileSettings.ThresholdVal, 1, ThresholdType.Binary);

            CvInvoke.Imshow("3", greyResult);

            //erode to split the blobs
            CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(-1, -1), scan.TileSettings.ThresholdErode, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);

            //convert to 8 bit unsigned needed for canny
            greyResult.ConvertTo(greyResult, DepthType.Cv8U);

            VectorOfVectorOfPoint markers = new VectorOfVectorOfPoint();

            //create 32bit, single channel image for result of markers
            Mat markerImage = new Mat(greyResult.Size, DepthType.Cv32S, 1);

            //set image to 0
            markerImage.SetTo(new MCvScalar(0, 0, 0));

            //find the contours
            CvInvoke.FindContours(greyResult, markers, null, RetrType.External, ChainApproxMethod.LinkRuns);

            //label the markers from 1 -> n, the rest of the image should remain 0
            for (int i = 0; i < markers.Size; i++)
                CvInvoke.DrawContours(markerImage, markers, i, new MCvScalar(i + 1, i + 1, i + 1), -1);

            ScalarArray mult = new ScalarArray(5000);
            Mat markerVisual = new Mat();

            CvInvoke.Multiply(markerImage, mult, markerVisual);

            CvInvoke.Imshow("4", markerVisual);

            //draw the background marker
            CvInvoke.Circle(markerImage,
                new System.Drawing.Point(5, 5),
                3,
                new MCvScalar(255, 255, 255),
                -1);

            //convert to 3 channel
            Mat convertedOriginal = new Mat();
            
            //use canny modified if 3/4", or use the gray image for others

            CvInvoke.CvtColor(greySource, convertedOriginal, ColorConversion.Gray2Bgr);

            //watershed!!
            CvInvoke.Watershed(convertedOriginal, markerImage);
            //visualize
            CvInvoke.Multiply(markerImage, mult, markerVisual);
            CvInvoke.Imshow("5", markerVisual);

            //get contours to get the actual tiles now that they are separate...
//.........这里部分代码省略.........
开发者ID:alecrudd,项目名称:GocatorImager,代码行数:101,代码来源:Form1.cs

示例3: FindMatch

      public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
      {
         int k = 2;
         double uniquenessThreshold = 0.8;
         double hessianThresh = 300;
         
         Stopwatch watch;
         homography = null;

         modelKeyPoints = new VectorOfKeyPoint();
         observedKeyPoints = new VectorOfKeyPoint();

         #if !__IOS__
         if ( CudaInvoke.HasCuda)
         {
            CudaSURF surfCuda = new CudaSURF((float) hessianThresh);
            using (GpuMat gpuModelImage = new GpuMat(modelImage))
            //extract features from the object image
            using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
            using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
            {
               surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
               watch = Stopwatch.StartNew();

               // extract features from the observed image
               using (GpuMat gpuObservedImage = new GpuMat(observedImage))
               using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
               using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
               //using (GpuMat tmp = new GpuMat())
               //using (Stream stream = new Stream())
               {
                  matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

                  surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                  mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                  mask.SetTo(new MCvScalar(255));
                  Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                  int nonZeroCount = CvInvoke.CountNonZero(mask);
                  if (nonZeroCount >= 4)
                  {
                     nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                        matches, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                           observedKeyPoints, matches, mask, 2);
                  }
               }
                  watch.Stop();
               }
            }
         else
         #endif
         {
            using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
            using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
            {
               SURF surfCPU = new SURF(hessianThresh);
               //extract features from the object image
               UMat modelDescriptors = new UMat();
               surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

               watch = Stopwatch.StartNew();

               // extract features from the observed image
               UMat observedDescriptors = new UMat();
               surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
               BFMatcher matcher = new BFMatcher(DistanceType.L2);
               matcher.Add(modelDescriptors);

               matcher.KnnMatch(observedDescriptors, matches, k, null);
               mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
               mask.SetTo(new MCvScalar(255));
               Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

               int nonZeroCount = CvInvoke.CountNonZero(mask);
               if (nonZeroCount >= 4)
               {
                  nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                     matches, mask, 1.5, 20);
                  if (nonZeroCount >= 4)
                     homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                        observedKeyPoints, matches, mask, 2);
               }

               watch.Stop();
            }
         }
         matchTime = watch.ElapsedMilliseconds;
      }
开发者ID:Delaley,项目名称:emgucv,代码行数:92,代码来源:DrawMatches.cs

示例4: Start

   // Use this for initialization
   void Start()
   {      
      Mat img = new Mat(new Size(640, 240), DepthType.Cv8U, 3);
      img.SetTo(new MCvScalar());
      String openclStr = "None";
      if (CvInvoke.HaveOpenCL)
      {
         //StringBuilder builder = new StringBuilder();
         using (VectorOfOclPlatformInfo oclPlatformInfos = OclInvoke.GetPlatformsInfo())
         {
            if (oclPlatformInfos.Size > 0)
            {
               PlatformInfo platformInfo = oclPlatformInfos[0];
               openclStr = platformInfo.ToString();
            }
         }
      }

      CvInvoke.PutText(img, String.Format("Emgu CV for Unity {0}", Emgu.Util.Platform.OperationSystem), new System.Drawing.Point(10, 60), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                       1.0, new MCvScalar(0, 255, 0));

      CvInvoke.PutText(img, String.Format("OpenCL: {0}",openclStr), new System.Drawing.Point(10, 120), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                       1.0, new MCvScalar(0, 0, 255));

      Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

      this.GetComponent<GUITexture>().texture = texture;
      this.GetComponent<GUITexture>().pixelInset = new Rect(-img.Width / 2, -img.Height / 2, img.Width, img.Height);
      
   }
开发者ID:neutmute,项目名称:emgucv,代码行数:31,代码来源:HelloTexture.cs

示例5: TryFindHomography

        private Mat TryFindHomography(VectorOfKeyPoint modelKeyPoints, VectorOfKeyPoint observedKeyPoints,
            VectorOfVectorOfDMatch matches)
        {
            var mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));

            try
            {
                Features2DToolbox.VoteForUniqueness(matches, threshold, mask);

                var nonZeroCount = CvInvoke.CountNonZero(mask);

                if (nonZeroCount < 4)
                {
                    return null;
                }

                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                    matches, mask, 1.5, 20);

                if (nonZeroCount >= 4)
                {
                    return Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                        observedKeyPoints, matches, mask, 2);
                }
            }
            catch (Exception)
            {
                return null;
            }
            return null;
        }
开发者ID:rachwal,项目名称:RTM-Tools,代码行数:32,代码来源:CudaHomographyCalculator.cs

示例6: TestMatEquals

      public void TestMatEquals()
      {
         Mat m1 = new Mat(640, 320, DepthType.Cv8U, 3);
         m1.SetTo(new MCvScalar(1, 2, 3));
         Mat m2 = new Mat(640, 320, DepthType.Cv8U, 3);
         m2.SetTo(new MCvScalar(1, 2, 3));
         
         EmguAssert.IsTrue(m1.Equals(m2));

      }
开发者ID:reidblomquist,项目名称:emgucv,代码行数:10,代码来源:AutoTestMat.cs

示例7: FilterPlate

        private static UMat FilterPlate(UMat plate)
        {
            UMat thresh = new UMat();
            CvInvoke.Threshold(plate, thresh, 120, 255, ThresholdType.BinaryInv);
            //Image<Gray, Byte> thresh = plate.ThresholdBinaryInv(new Gray(120), new Gray(255));

            Size plateSize = plate.Size;
            using (Mat plateMask = new Mat(plateSize.Height, plateSize.Width, DepthType.Cv8U, 1))
            using (Mat plateCanny = new Mat())
            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                plateMask.SetTo(new MCvScalar(255.0));
                CvInvoke.Canny(plate, plateCanny, 100, 50);
                CvInvoke.FindContours(plateCanny, contours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple);

                int count = contours.Size;
                for (int i = 1; i < count; i++)
                {
                    using (VectorOfPoint contour = contours[i])
                    {

                        Rectangle rect = CvInvoke.BoundingRectangle(contour);
                        if (rect.Height > (plateSize.Height >> 1))
                        {
                            rect.X -= 1;
                            rect.Y -= 1;
                            rect.Width += 2;
                            rect.Height += 2;
                            Rectangle roi = new Rectangle(Point.Empty, plate.Size);
                            rect.Intersect(roi);
                            CvInvoke.Rectangle(plateMask, rect, new MCvScalar(), -1);
                            //plateMask.Draw(rect, new Gray(0.0), -1);
                        }
                    }

                }

                thresh.SetTo(new MCvScalar(), plateMask);
            }

            CvInvoke.Erode(thresh, thresh, null, new Point(-1, -1), 1, BorderType.Constant,
                CvInvoke.MorphologyDefaultBorderValue);
            CvInvoke.Dilate(thresh, thresh, null, new Point(-1, -1), 1, BorderType.Constant,
                CvInvoke.MorphologyDefaultBorderValue);

            return thresh;
        }
开发者ID:Neths,项目名称:ReStudio,代码行数:47,代码来源:Form1.cs

示例8: ApplyFilter

    public void ApplyFilter(Mat src)
    {
        CvInvoke.CvtColor(src, src, ColorConversion.Bgr2Hsv);

        Mat threshold = new Mat(src.Height, src.Width, src.Depth, src.NumberOfChannels);
        MCvScalar min = new MCvScalar(m_hmin, m_smin, m_vmin);
        MCvScalar max = new MCvScalar(m_hmax, m_smax, m_vmax);

        CvInvoke.InRange(src, new ScalarArray(min), new ScalarArray(max), threshold);

        Mat element = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(3,3), Point.Empty);
        CvInvoke.Erode(threshold, threshold, element, Point.Empty, 1, BorderType.Constant, new MCvScalar(1.0f));
        CvInvoke.Canny(threshold, threshold, 100, 255);

        VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
        Mat hierarchy = new Mat();

         CvInvoke.FindContours(threshold, contours, hierarchy, RetrType.Tree, ChainApproxMethod.ChainApproxSimple, Point.Empty);

        Mat draw = new Mat(src.Height, src.Width, src.Depth, 1);
        draw.SetTo(new MCvScalar(0.0));
        int i = 0;

        //Debug.Log("CONTOURS");

        var contoursArray = contours.ToArrayOfArray();
        foreach(Point[] contour in contoursArray)
        {
            CvInvoke.DrawContours(draw, contours, i, new MCvScalar(255.0), 1, LineType.EightConnected, null, int.MaxValue, Point.Empty);

         	double a = CvInvoke.ContourArea(new VectorOfPoint(contour));
            //Debug.Log("Contour: " + a);
            i++;
        }

        //Emgu.CV.UI.ImageViewer.Show(draw, "test");

        if(m_onFrame != null) m_onFrame.Invoke(draw);
    }
开发者ID:AndyGates,项目名称:TrackMyBalls,代码行数:39,代码来源:ThresholdFilter.cs

示例9: TestMinEnclosingCircle

      public void TestMinEnclosingCircle()
      {
#region generate random points
         System.Random r = new Random();
         int sampleCount = 100;
         Ellipse modelEllipse = new Ellipse(new PointF(200, 200), new SizeF(90, 60), -60);
         PointF[] pts = PointCollection.GeneratePointCloud(modelEllipse, sampleCount);
#endregion

         Stopwatch watch = Stopwatch.StartNew();
         CircleF circle = CvInvoke.MinEnclosingCircle(pts);
         watch.Stop();

#region draw the points and the circle
         Mat img = new Mat(400, 400, DepthType.Cv8U, 3);
         img.SetTo(new MCvScalar(255, 255, 255));
         foreach (PointF p in pts)
            CvInvoke.Circle(img, Point.Round(p), 2, new MCvScalar(0, 255, 0), 1);
#endregion

         //Emgu.CV.UI.ImageViewer.Show(img, String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds));
      }
开发者ID:Delaley,项目名称:emgucv,代码行数:22,代码来源:AutoTestVarious.cs

示例10: TestMinAreaRect

      public void TestMinAreaRect()
      {
         #region generate random points
         System.Random r = new Random();
         int sampleCount = 100;
         Ellipse modelEllipse = new Ellipse(new PointF(200, 200), new SizeF(90, 60), -60);
         PointF[] pts = PointCollection.GeneratePointCloud(modelEllipse, sampleCount);
         #endregion

         Stopwatch watch = Stopwatch.StartNew();
         RotatedRect box = CvInvoke.MinAreaRect(pts);
         watch.Stop();

         #region draw the points and the box
         Mat img = new Mat(400, 400, DepthType.Cv8U, 3);
         img.SetTo(new MCvScalar(255, 255, 255));
#if NETFX_CORE
         Point[] vertices = Extensions.ConvertAll(box.GetVertices(), Point.Round);
#else
         Point[] vertices = Array.ConvertAll(box.GetVertices(), Point.Round);
#endif
         
         CvInvoke.Polylines(img, vertices, true, new MCvScalar(0, 0, 255), 1);
         foreach (PointF p in pts)
            CvInvoke.Circle(img, Point.Round(p), 2, new MCvScalar(0, 255, 0), 1);
#endregion

         //Emgu.CV.UI.ImageViewer.Show(img, String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds));
      }
开发者ID:Delaley,项目名称:emgucv,代码行数:29,代码来源:AutoTestVarious.cs

示例11: TestEllipseFitting

      public void TestEllipseFitting()
      {
         #region generate random points
         System.Random r = new Random();
         int sampleCount = 100;
         Ellipse modelEllipse = new Ellipse(new PointF(200, 200), new SizeF(150, 60), 90);
         PointF[] pts = PointCollection.GeneratePointCloud(modelEllipse, sampleCount);
         #endregion

         Stopwatch watch = Stopwatch.StartNew();
         Ellipse fittedEllipse = PointCollection.EllipseLeastSquareFitting(pts);
         watch.Stop();

         #region draw the points and the fitted ellips
         Mat img = new Mat(400, 400, DepthType.Cv8U, 3);
         img.SetTo(new MCvScalar(255, 255, 255));
         foreach (PointF p in pts)
            CvInvoke.Circle(img, Point.Round(p), 2, new MCvScalar(0, 255, 0), 1);
         RotatedRect rect = fittedEllipse.RotatedRect;
         rect.Angle += 90; //the detected ellipse was off by 90 degree
         CvInvoke.Ellipse(img, rect, new MCvScalar(0, 0, 255), 2);
         #endregion
         
         //Emgu.CV.UI.ImageViewer.Show(img, String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds));
      }
开发者ID:Delaley,项目名称:emgucv,代码行数:25,代码来源:AutoTestVarious.cs

示例12: TestConvexHull

      public void TestConvexHull()
      {
         #region Create some random points
         Random r = new Random();
         PointF[] pts = new PointF[200];
         for (int i = 0; i < pts.Length; i++)
         {
            pts[i] = new PointF((float)(100 + r.NextDouble() * 400), (float)(100 + r.NextDouble() * 400));
         }
         #endregion

         Mat img = new Mat(600, 600, DepthType.Cv8U, 3);
         img.SetTo(new MCvScalar(255.0, 255.0, 255.0));
         //Draw the points 
         foreach (PointF p in pts)
            CvInvoke.Circle(img, Point.Round(p), 3, new MCvScalar(0.0, 0.0, 0.0));

         //Find and draw the convex hull

         Stopwatch watch = Stopwatch.StartNew();
         PointF[] hull = CvInvoke.ConvexHull(pts, true);
         watch.Stop();
         CvInvoke.Polylines(
            img,
#if NETFX_CORE
            Extensions.ConvertAll<PointF, Point>(hull, Point.Round),
#else
            Array.ConvertAll<PointF, Point>(hull, Point.Round),
#endif
            true, new MCvScalar(255.0, 0.0, 0.0));

         //Emgu.CV.UI.ImageViewer.Show(img, String.Format("Convex Hull Computed in {0} milliseconds", watch.ElapsedMilliseconds));

      }
开发者ID:Delaley,项目名称:emgucv,代码行数:34,代码来源:AutoTestVarious.cs

示例13: TestDenseHistogram2

      public void TestDenseHistogram2()
      {
         Mat img = new Mat(400, 400, DepthType.Cv8U, 3);
         CvInvoke.Randu(img, new MCvScalar(), new MCvScalar(255,255,255));
         Mat hist = new Mat();
         using (VectorOfMat vms = new VectorOfMat(img))
         {
            CvInvoke.CalcHist(vms, new int[] {0, 1, 2}, null, hist, new int[] {20, 20, 20},
               new float[] {0, 255, 0, 255, 0, 255}, true);
            byte[] bytes = hist.GetData();
            hist.SetTo(bytes);

            float[] bins = new float[20*20*20];
            hist.CopyTo(bins);
         }
      }
开发者ID:Delaley,项目名称:emgucv,代码行数:16,代码来源:AutoTestVarious.cs

示例14: ProcessFrame

      private void ProcessFrame(object sender, EventArgs e)
      {
         Mat image = new Mat();

         _capture.Retrieve(image);
         if (_forgroundDetector == null)
         {
            _forgroundDetector = new BackgroundSubtractorMOG2();
         }

         _forgroundDetector.Apply(image, _forgroundMask);

         //update the motion history
         _motionHistory.Update(_forgroundMask);         

         #region get a copy of the motion mask and enhance its color
         double[] minValues, maxValues;
         Point[] minLoc, maxLoc;
         _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc);
         Mat motionMask = new Mat();
         using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0]))
            CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U);
         //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]);
         #endregion

         //create the motion image 
         Mat motionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3);
         motionImage.SetTo(new MCvScalar(0));
         //display the motion pixels in blue (first channel)
         //motionImage[0] = motionMask;
         CvInvoke.InsertChannel(motionMask, motionImage, 0);

         //Threshold to define a motion area, reduce the value to detect smaller motion
         double minArea = 100;

         //storage.Clear(); //clear the storage
         Rectangle[] rects;
         using (VectorOfRect boundingRect = new VectorOfRect())
         {
            _motionHistory.GetMotionComponents(_segMask, boundingRect);
            rects = boundingRect.ToArray();
         }

         //iterate through each of the motion component
         foreach (Rectangle comp in rects)
         {
            int area = comp.Width * comp.Height;
            //reject the components that have small area;
            if (area < minArea) continue;

            // find the angle and motion pixel count of the specific area
            double angle, motionPixelCount;
            _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount);

            //reject the area that contains too few motion
            if (motionPixelCount < area * 0.05) continue;

            //Draw each individual motion in red
            DrawMotion(motionImage, comp, angle, new Bgr(Color.Red));
         }

         // find and draw the overall motion angle
         double overallAngle, overallMotionPixelCount;

         _motionHistory.MotionInfo(_forgroundMask, new Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount);
         DrawMotion(motionImage, new Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(Color.Green));

         if (this.Disposing || this.IsDisposed)
            return;

         capturedImageBox.Image = image;
         forgroundImageBox.Image = _forgroundMask;

         //Display the amount of motions found on the current image
         UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", rects.Length, overallMotionPixelCount));

         //Display the image of the motion
         motionImageBox.Image = motionImage;

      }
开发者ID:neutmute,项目名称:emgucv,代码行数:80,代码来源:Form1.cs

示例15: Compare2Features

        public Image Compare2Features(
            string destFeatureFile,
            string origFeatureFile,
            string vpFileDest,
            string vpFileOrig,
            string destImageFile = "",
            string origImageFile = "",
            bool needMatchedImage = false)
        {
            EmguType destFeatures = Utils.ReadJsonFile<EmguType>(destFeatureFile);
            EmguType origFeatures = Utils.ReadJsonFile<EmguType>(origFeatureFile);

            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            matcher.Add(origFeatures.Descriptors);
            matcher.KnnMatch(destFeatures.Descriptors, matches, 2, null);
            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

            Mat homography = null;
            int Count = CvInvoke.CountNonZero(mask);      //用于寻找模板在图中的位置
            if (Count >= 4)
            {
                Count = Features2DToolbox.VoteForSizeAndOrientation(origFeatures.KeyPoints, destFeatures.KeyPoints, matches, mask, 1.5, 20);
                if (Count >= 4)
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(origFeatures.KeyPoints, destFeatures.KeyPoints, matches, mask, 2);
            }

            Mat showImage = null;
            Mat pointImage = null;
            if (needMatchedImage
                && !string.IsNullOrWhiteSpace(destImageFile)
                && !string.IsNullOrWhiteSpace(origImageFile))
            {

                Image<Gray, Byte> destImage = new Image<Gray, Byte>(destImageFile);
                Image<Gray, Byte> origImage = new Image<Gray, Byte>(origImageFile);

                showImage = new Mat(origImage.Size, DepthType.Cv8U, 3);
                pointImage = new Mat(origImage.Size, DepthType.Cv8U, 3);
                //add optical vp line

                string vpPath = Path.GetDirectoryName(vpFileDest);
                List<FileInfo> files = new List<FileInfo>();
                DirectoryInfo dirInfo = new DirectoryInfo(vpPath);
                FileInfo[] fsinfo = dirInfo.GetFiles();
                FileInfo[] vppFiles = fsinfo.Where(p => p.Name.Contains(".jpgpp.dat")).OrderBy(p => p.Name).ToArray();
                //FileInfo[] vpdFiles = fsinfo.Where(p => p.Name.Contains(".jpgpd.dat")).OrderBy(p => p.Name).ToArray();

                for (int k = 0; k < vppFiles.Length - 1; k++)
                {

                    VectorOfPointF vpDest = Utils.ReadJsonFile<VectorOfPointF>(vppFiles[k+1].FullName);
                    //VectorOfPointF vpOrig = Utils.ReadJsonFile<VectorOfPointF>(vpdFiles[k].FullName);
                    VectorOfPointF vpOrig = Utils.ReadJsonFile<VectorOfPointF>(vppFiles[k].FullName);

                    //Restart the start point of motion tracking.
                    if ((k+1) % Constants.DETECTIVE_GROUP_COUNT == 0)
                        continue;

                    Point[] pointsDest = Array.ConvertAll<PointF, Point>(vpDest.ToArray(), Point.Round);
                    Point[] pointsOirg = Array.ConvertAll<PointF, Point>(vpOrig.ToArray(), Point.Round);

                    for (int i = 0; i < pointsDest.Length; i++)
                    {
                        Point[] ps = { pointsDest[i], pointsOirg[i] };
                        CvInvoke.Polylines(pointImage, ps, true, new MCvScalar(0, 0, 255, 255));
                        //CvInvoke.Circle(pointImage, pointsOirg[i], 1, new MCvScalar(0, 255, 0, 255));
                    }
                }

                Image<Bgr, Byte> firstImg = new Image<Bgr, Byte>(origImageFile);
                Image<Bgr, Byte> lastImg = new Image<Bgr, Byte>("D:\\MyPrj\\mygitcode\\MyCode\\ExamVideoProcess\\ExamVideoProcess\\bin\\x64\\Debug\\initVideo\\30Grayimg.jpg");
                CvInvoke.AddWeighted(firstImg, 0.5, lastImg, 0.5, 0.0, showImage, DepthType.Cv8U);
                CvInvoke.AddWeighted(showImage, 0.5, pointImage, 0.5, 0.0, showImage, DepthType.Cv8U);

                /*
                Features2DToolbox.DrawMatches(origImage.Convert<Gray, Byte>().Mat, origFeatures.KeyPoints, destImage.Convert<Gray, Byte>().Mat, destFeatures.KeyPoints, matches, showImage, new MCvScalar(255, 0, 255), new MCvScalar(0, 255, 255), mask);
                if (homography != null)     //如果在图中找到了模板,就把它画出来
                {
                    Rectangle rect = new Rectangle(Point.Empty, origImage.Size);
                    PointF[] points = new PointF[]
                {
                  new PointF(rect.Left, rect.Bottom),
                  new PointF(rect.Right, rect.Bottom),
                  new PointF(rect.Right, rect.Top),
                  new PointF(rect.Left, rect.Top)
                };
                    points = CvInvoke.PerspectiveTransform(points, homography);
                    Point[] points2 = Array.ConvertAll<PointF, Point>(points, Point.Round);
                    VectorOfPoint vp = new VectorOfPoint(points2);
                    CvInvoke.Polylines(showImage, vp, true, new MCvScalar(255, 0, 0, 255), 15);

                }
                */
                return showImage.Bitmap;
            }
            return null;
        }
开发者ID:Lionel1204,项目名称:MyCode,代码行数:100,代码来源:CompareFeatures.cs


注:本文中的Mat.SetTo方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。