当前位置: 首页>>代码示例>>C#>>正文


C# Contour.PushMulti方法代码示例

本文整理汇总了C#中Contour.PushMulti方法的典型用法代码示例。如果您正苦于以下问题:C# Contour.PushMulti方法的具体用法?C# Contour.PushMulti怎么用?C# Contour.PushMulti使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Contour的用法示例。


在下文中一共展示了Contour.PushMulti方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: SignDetector

 public SignDetector(Image<Bgr, Byte> stopSignModel)
 {
     _detector2 = new SURFDetector(500, false);
     using (Image<Gray, Byte> redMask = GetColorPixelMask(stopSignModel))
     {
         try
         {
             _tracker2 = new Features2DTracker<float>(_detector2.DetectFeatures(redMask, null));
         }
         catch { }
     }
     _octagonStorage2 = new MemStorage();
     _octagon2 = new Contour<Point>(_octagonStorage2);
     _octagon2.PushMulti(new Point[] { 
         //hexagon
         new Point(1, 0),
         new Point(2, 0),
         new Point(3, 1),
         new Point(2, 2),
         new Point(1, 2),
         new Point(0, 1)},
         //octagon
     //new Point(1, 0),
     //new Point(2, 0),
     //new Point(3, 1),
     //new Point(3, 2),
     //new Point(2, 3),
     //new Point(1, 3),
     //new Point(0, 2),
     //new Point(0, 1)},
        Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT);
 }
开发者ID:petrind,项目名称:SRTesis2,代码行数:32,代码来源:SignDetector.cs

示例2: CamShiftTrack

      /// <summary>
      /// Use camshift to track the feature
      /// </summary>
      /// <param name="observedFeatures">The feature found from the observed image</param>
      /// <param name="initRegion">The predicted location of the model in the observed image. If not known, use MCvBox2D.Empty as default</param>
      /// <param name="priorMask">The mask that should be the same size as the observed image. Contains a priori value of the probability a match can be found. If you are not sure, pass an image fills with 1.0s</param>
      /// <returns>If a match is found, the homography projection matrix is returned. Otherwise null is returned</returns>
      public HomographyMatrix CamShiftTrack(SURFFeature[] observedFeatures, MCvBox2D initRegion, Image<Gray, Single> priorMask)
      {
         using (Image<Gray, Single> matchMask = new Image<Gray, Single>(priorMask.Size))
         {
            #region get the list of matched point on the observed image
            Single[, ,] matchMaskData = matchMask.Data;

            //Compute the matched features
            MatchedSURFFeature[] matchedFeature = _matcher.MatchFeature(observedFeatures, 2, 20);
            matchedFeature = VoteForUniqueness(matchedFeature, 0.8);

            foreach (MatchedSURFFeature f in matchedFeature)
            {
               PointF p = f.ObservedFeature.Point.pt;
               matchMaskData[(int)p.Y, (int)p.X, 0] = 1.0f / (float) f.SimilarFeatures[0].Distance;
            }
            #endregion

            Rectangle startRegion;
            if (initRegion.Equals(MCvBox2D.Empty))
               startRegion = matchMask.ROI;
            else
            {
               startRegion = PointCollection.BoundingRectangle(initRegion.GetVertices());
               if (startRegion.IntersectsWith(matchMask.ROI))
                  startRegion.Intersect(matchMask.ROI);
            }

            CvInvoke.cvMul(matchMask.Ptr, priorMask.Ptr, matchMask.Ptr, 1.0);

            MCvConnectedComp comp;
            MCvBox2D currentRegion;
            //Updates the current location
            CvInvoke.cvCamShift(matchMask.Ptr, startRegion, new MCvTermCriteria(10, 1.0e-8), out comp, out currentRegion);

            #region find the SURF features that belongs to the current Region
            MatchedSURFFeature[] featuesInCurrentRegion;
            using (MemStorage stor = new MemStorage())
            {
               Contour<System.Drawing.PointF> contour = new Contour<PointF>(stor);
               contour.PushMulti(currentRegion.GetVertices(), Emgu.CV.CvEnum.BACK_OR_FRONT.BACK);

               CvInvoke.cvBoundingRect(contour.Ptr, 1); //this is required before calling the InContour function

               featuesInCurrentRegion = Array.FindAll(matchedFeature,
                  delegate(MatchedSURFFeature f)
                  { return contour.InContour(f.ObservedFeature.Point.pt) >= 0; });
            }
            #endregion

            return GetHomographyMatrixFromMatchedFeatures(VoteForSizeAndOrientation(featuesInCurrentRegion, 1.5, 20 ));
         }
      }
开发者ID:Rustemt,项目名称:emgu_openCV,代码行数:60,代码来源:SURFTracker.cs

示例3: PointDetector

 public PointDetector()
 {
     joinContourStorage = new MemStorage();
     joinContour = new Contour<Point>(joinContourStorage);
     imageSelector = new Image<Gray, byte>("C:\\monitor_photo_tengah_Repaired_Selected.jpg").Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR);
     _rectStorage = new MemStorage();
     rect = new Contour<Point>(_rectStorage);
     rect.PushMulti(new Point[] { 
         //rect
         new Point(0, 0),
         new Point(20, 0),
         new Point(20, 20),
         new Point(0, 20)},
        Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT);
 }
开发者ID:petrind,项目名称:SRTesis2,代码行数:15,代码来源:PointDetector.cs

示例4: StopSignDetector

 public StopSignDetector()
 {
     _surfParam = new MCvSURFParams(500, false);
      using (Image<Bgr, Byte> stopSignModel = new Image<Bgr, Byte>("stop-sign-model.png"))
      using (Image<Gray, Byte> redMask = GetRedPixelMask(stopSignModel))
      {
     _tracker = new SURFTracker(redMask.ExtractSURF(ref _surfParam));
      }
      _octagonStorage = new MemStorage();
      _octagon = new Contour<Point>(_octagonStorage);
      _octagon.PushMulti(new Point[] {
     new Point(1, 0),
     new Point(2, 0),
     new Point(3, 1),
     new Point(3, 2),
     new Point(2, 3),
     new Point(1, 3),
     new Point(0, 2),
     new Point(0, 1)},
     Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT);
 }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:21,代码来源:StopSignDetector.cs

示例5: ImageDetector

        public ImageDetector(Image<Gray, Byte> imgModel)
        {
            _detector = new SURFDetector(500, false);
            ImageFeature<float>[] features = _detector.DetectFeatures(imgModel, null);
            if (features.Length == 0)
                throw new Exception("No image feature has been found in the image model");
            _tracker = new Features2DTracker<float>(features);

            _octagonStorage = new MemStorage();
            _octagon = new Contour<Point>(_octagonStorage);
            _octagon.PushMulti(new Point[] {
            new Point(1, 0),
            new Point(2, 0),
            new Point(3, 1),
            new Point(3, 2),
            new Point(2, 3),
            new Point(1, 3),
            new Point(0, 2),
            new Point(0, 1)},
               Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT);
        }
开发者ID:etp-work,项目名称:ThePower,代码行数:21,代码来源:ImageDetector.cs

示例6: StopSignDetector

 public StopSignDetector(Image<Bgr, Byte> stopSignModel)
 {
     _detector = new SURFDetector(500, false);
     using (Image<Gray, Byte> redMask = GetRedPixelMask(stopSignModel))
     {
         ImageFeature<float>[] temp = _detector.DetectFeatures(redMask, null);
         _tracker = new Features2DTracker<float>(temp);
     }
     _octagonStorage = new MemStorage();
     _octagon = new Contour<Point>(_octagonStorage);
     _octagon.PushMulti(new Point[] {
         new Point(1, 0),
         new Point(2, 0),
         new Point(3, 1),
         new Point(3, 2),
         new Point(2, 3),
         new Point(1, 3),
         new Point(0, 2),
         new Point(0, 1)},
         Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT
     );
 }
开发者ID:quadrowin,项目名称:afkgamer,代码行数:22,代码来源:StopSignDetector.cs

示例7: CreateContour

 private void CreateContour()
 {
     Point[] octagon = new Point[] { new Point(1, 0), new Point(2, 0), new Point(3, 1), new Point(3, 2), new Point(2, 3), new Point(1, 3), new Point(0, 2), new Point(0, 1) };
     octagonStorage = new MemStorage();
     octagonContour = new Contour<Point>(octagonStorage);
     octagonContour.PushMulti(octagon, Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT);
 }
开发者ID:abraxas4,项目名称:AR-Drone-Project,代码行数:7,代码来源:SignDetector.cs

示例8: doImageProcessing

        private void doImageProcessing()
        {
            // Translate our most recent color coordinates - Done before the bg worker as
            // we cant acess the sensor inside another thread

            // Clear the green screen
            Array.Clear(_greenScreenPixelData, 0, _greenScreenPixelData.Length);
            // Emgu CV Image
            using (Image<Emgu.CV.Structure.Gray, byte> emguOriginal = new Image<Emgu.CV.Structure.Gray, byte>(640, 480))
            {
                byte[, ,] emguData = emguOriginal.Data;

                // We have to iterate the whole depth image
                for (int y = 0; y < _depthStreamFrameHeight; ++y)
                {
                    for (int x = 0; x < _depthStreamFrameWidth; ++x)
                    {
                        // calculate index into depth array
                        int depthIndex = x + (y * _sensorRef.DepthStream.FrameWidth);

                        DepthImagePixel depthPixel = _depthPixels[depthIndex];

                        // retrieve the depth to color mapping for the current depth pixel
                        ColorImagePoint colorImagePoint = _colorCoordinates[depthIndex];

                        // scale color coordinates to depth resolution
                        int colorInDepthX = colorImagePoint.X;
                        int colorInDepthY = colorImagePoint.Y;

                        // make sure the depth pixel maps to a valid point in color space
                        // check y > 0 and y < depthHeight to make sure we don't write outside of the array
                        // check x > 0 instead of >= 0 since to fill gaps we set opaque current pixel plus the one to the left
                        // because of how the sensor works it is more correct to do it this way than to set to the right
                        if (colorInDepthX > 0 && colorInDepthX < _depthStreamFrameWidth && colorInDepthY >= 0 && colorInDepthY < _depthStreamFrameHeight)
                        {
                            // calculate index into the green screen pixel array
                            int greenScreenIndex = colorInDepthX + (colorInDepthY * _depthStreamFrameWidth);

                            // OK emgu needs a black and white only image.
                            if (depthPixel.Depth < _depthThreshold && depthPixel.Depth != 0)
                            {
                                // set opaque
                                _greenScreenPixelData[greenScreenIndex] = opaquePixelValue;

                                // compensate for depth/color not corresponding exactly by setting the pixel 
                                // to the left to opaque as well
                                _greenScreenPixelData[greenScreenIndex - 1] = opaquePixelValue;

                                // Emgu needs an all black image with pure white where the depth data is
                                emguData[colorInDepthY, colorInDepthX, 0] = 255;

                                // set the pixel before this white too. We dont need this in blob detection as the blobs will fill in
                                // it just ends up adding extra on all the left edges
                                /*
                                if (colorInDepthX - 1 > -1)
                                {
                                    emguData[colorInDepthY, colorInDepthX - 1, 0] = 255;
                                }
                                */
                            }
                        }
                    }
                }

                    // emguCV work
                    Emgu.CV.Cvb.CvBlobs resultingBlobs = new Emgu.CV.Cvb.CvBlobs();
                    Emgu.CV.Cvb.CvBlobDetector bDetect = new Emgu.CV.Cvb.CvBlobDetector();
                    uint numLabeledPixels = bDetect.Detect(emguOriginal, resultingBlobs);

                    Image<Emgu.CV.Structure.Bgra, double> blobImg = new Image<Emgu.CV.Structure.Bgra, double>(emguOriginal.Width, emguOriginal.Height, new Emgu.CV.Structure.Bgra(0, 0, 0, 0));
                    foreach (Emgu.CV.Cvb.CvBlob targetBlob in resultingBlobs.Values)
                    {
                        using (MemStorage mem_BlobContours = new MemStorage())
                        {
                            Contour<System.Drawing.Point> allContourPointsInBlob = targetBlob.GetContour(mem_BlobContours);

                            // If thre are more than five points smooth them
                            if (allContourPointsInBlob.Total > 5)
                            {

                                System.Drawing.Point[] originalPoints = allContourPointsInBlob.ToArray();
                                System.Drawing.Point[] smoothedPoints = EmguUtilities.getSmoothedContour(originalPoints, 6, (float)0.5, Properties.Settings.Default.kinectGreenScreenMaskXPixelShift);

                                //------------- FILL -----------------------------------
                                // Sweet shove em back into a contour collection

                                MemStorage finalFillStorage = new MemStorage();
                                Contour<System.Drawing.Point> finalFillContours = new Contour<System.Drawing.Point>(finalFillStorage);
                                finalFillContours.PushMulti(smoothedPoints, Emgu.CV.CvEnum.BACK_OR_FRONT.BACK);
                                blobImg.Draw(finalFillContours, black, -1);

                                // ------------ END FILL ------------------------------
                            }
                        }
                    }

                    // Converts an emgu cv image to a bitmapsource
                    BitmapSource finalRef = EmguUtilities.ToBitmapSource(blobImg);
                    finalRef.Freeze();
                    // Ensure the greenScreenMask is locked before doing this
//.........这里部分代码省略.........
开发者ID:guozanhua,项目名称:MFDetroit2013_Kinect_GreenScreen_PhotoKiosk,代码行数:101,代码来源:GreenScreenImplementation.cs

示例9: TestContour2

        public void TestContour2()
        {
            Image<Bgr, Byte> img1 = new Image<Bgr, byte>(200, 200);
             Image<Bgr, Byte> img2 = new Image<Bgr, byte>(200, 200);
             using (MemStorage stor = new MemStorage())
             {
            Point[] polyline = new Point[] {
               new Point(20, 20),
               new Point(20, 30),
               new Point(30, 30),
               new Point(30, 20)};

            Contour<Point> c = new Contour<Point>(stor);
            c.PushMulti(polyline, Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT);

            img1.Draw(c, new Bgr(255, 0, 0), new Bgr(), 0, -1, new Point(0, 0));
            img1.Draw(c, new Bgr(0, 255, 0), new Bgr(), 0, -1, new Point(20, 10));
            img1.Draw(c, new Bgr(0, 0, 255), new Bgr(), 0, 1, new Point(20, 10));

            /*
            for (int i = 0; i < polyline.Length; i++)
            {
               polyline[i].X += 20;
               polyline[i].Y += 10;
            }
            img1.DrawPolyline(polyline, true, new Bgr(0, 0, 255), 1);
             */
             }
        }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:29,代码来源:AutoTestImage.cs


注:本文中的Contour.PushMulti方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。