当前位置: 首页>>代码示例>>C#>>正文


C# Image.ExtractSURF方法代码示例

本文整理汇总了C#中Image.ExtractSURF方法的典型用法代码示例。如果您正苦于以下问题:C# Image.ExtractSURF方法的具体用法?C# Image.ExtractSURF怎么用?C# Image.ExtractSURF使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Image的用法示例。


在下文中一共展示了Image.ExtractSURF方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: Run

        static void Run()
        {
            MCvSURFParams surfParam = new MCvSURFParams(500, false);

             Image<Gray, Byte> modelImage = new Image<Gray, byte>("box.png");
             //extract features from the object image
             SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref surfParam);

             //Create a SURF Tracker
             SURFTracker tracker = new SURFTracker(modelFeatures);

             Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");

             Stopwatch watch = Stopwatch.StartNew();
             // extract features from the observed image
             SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref surfParam);

             SURFTracker.MatchedSURFFeature[] matchedFeatures = tracker.MatchFeature(imageFeatures, 2, 20);
             matchedFeatures = SURFTracker.VoteForUniqueness(matchedFeatures, 0.8);
             matchedFeatures = SURFTracker.VoteForSizeAndOrientation(matchedFeatures, 1.5, 20);
             HomographyMatrix homography = SURFTracker.GetHomographyMatrixFromMatchedFeatures(matchedFeatures);
             watch.Stop();

             //Merge the object image and the observed image into one image for display
             Image<Gray, Byte> res = modelImage.ConcateVertical(observedImage);

             #region draw lines between the matched features
             foreach (SURFTracker.MatchedSURFFeature matchedFeature in matchedFeatures)
             {
            PointF p = matchedFeature.ObservedFeature.Point.pt;
            p.Y += modelImage.Height;
            res.Draw(new LineSegment2DF(matchedFeature.SimilarFeatures[0].Feature.Point.pt, p), new Gray(0), 1);
             }
             #endregion

             #region draw the project region on the image
             if (homography != null)
             {  //draw a rectangle along the projected model
            Rectangle rect = modelImage.ROI;
            PointF[] pts = new PointF[] {
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};
            homography.ProjectPoints(pts);

            for (int i = 0; i < pts.Length; i++)
               pts[i].Y += modelImage.Height;

            res.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Gray(255.0), 5);
             }
             #endregion

             ImageViewer.Show(res, String.Format("Matched in {0} milliseconds", watch.ElapsedMilliseconds));
        }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:55,代码来源:SURFFeature.cs

示例2: ProcessSurf

        private static Image<Bgr, byte> ProcessSurf(Image<Gray, byte> modelImage, Image<Gray, byte> grayFrame, Image<Bgr, byte> rawFrame)
        {
            SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref _surfParam);
            SURFFeature[] imageFeatures = grayFrame.ExtractSURF(ref _surfParam);

            SURFTracker tracker = new SURFTracker(modelFeatures);

            SURFTracker.MatchedSURFFeature[] matchedFeatures = tracker.MatchFeature(imageFeatures, 2, 20);
            matchedFeatures = SURFTracker.VoteForUniqueness(matchedFeatures, 0.8);
            matchedFeatures = SURFTracker.VoteForSizeAndOrientation(matchedFeatures, 1.5, 20);
            HomographyMatrix homography = SURFTracker.GetHomographyMatrixFromMatchedFeatures(matchedFeatures);

            //Image<Bgr, Byte> processedImage = modelImage.Convert<Bgr, Byte>().ConcateVertical(rawFrame);
            var processedImage = rawFrame;
            //DrawLines(modelImage, matchedFeatures, processedImage);

            if (homography != null)
            {
                //draw a rectangle along the projected model
                var rect = modelImage.ROI;
                var pts = new PointF[]
                                   {
                                       new PointF(rect.Left, rect.Bottom),
                                       new PointF(rect.Right, rect.Bottom),
                                       new PointF(rect.Right, rect.Top),
                                       new PointF(rect.Left, rect.Top)
                                   };
                homography.ProjectPoints(pts);

                //for (int i = 0; i < pts.Length; i++)
                //    pts[i].Y += modelImage.Height;

                processedImage.DrawPolyline(Array.ConvertAll(pts, Point.Round), true, new Bgr(Color.DarkOrange), 1);
            }
            return processedImage;
        }
开发者ID:genecyber,项目名称:PredatorCV,代码行数:36,代码来源:Surf.cs

示例3: Run

      static void Run()
      {
         Image<Gray, Byte> modelImage = new Image<Gray, byte>("box.png");

         #region extract features from the object image
         MCvSURFParams param1 = new MCvSURFParams(500, false);
         SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref param1);
         SURFFeature[] modelFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
         SURFFeature[] modelFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });

         //Create feature trees for the given features
         FeatureTree featureTreePositiveLaplacian = new FeatureTree(
            Array.ConvertAll<SURFFeature, Matrix<float>>(
               modelFeaturesPositiveLaplacian,
               delegate(SURFFeature f) { return f.Descriptor; }));
         FeatureTree featureTreeNegativeLaplacian = new FeatureTree(
            Array.ConvertAll<SURFFeature, Matrix<float>>(
               modelFeaturesNegativeLaplacian,
               delegate(SURFFeature f) { return f.Descriptor; }));
         #endregion

         Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");

         #region extract features from the observed image
         MCvSURFParams param2 = new MCvSURFParams(500, false);
         SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref param2);
         SURFFeature[] imageFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
         SURFFeature[] imageFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });
         #endregion

         #region Merge the object image and the observed image into one image for display
         Image<Gray, Byte> res = new Image<Gray, byte>(Math.Max(modelImage.Width, observedImage.Width), modelImage.Height + observedImage.Height);
         res.ROI = new System.Drawing.Rectangle(0, 0, modelImage.Width, modelImage.Height);
         modelImage.Copy(res, null);
         res.ROI = new System.Drawing.Rectangle(0, modelImage.Height, observedImage.Width, observedImage.Height);
         observedImage.Copy(res, null);
         res.ROI = Rectangle.Empty;
         #endregion

         double matchDistanceRatio = 0.8;
         List<PointF> modelPoints = new List<PointF>();
         List<PointF> observePoints = new List<PointF>();

         #region using Feature Tree to match feature
         Matrix<float>[] imageFeatureDescriptorsPositiveLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
            imageFeaturesPositiveLaplacian,
            delegate(SURFFeature f) { return f.Descriptor; });
         Matrix<float>[] imageFeatureDescriptorsNegativeLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
            imageFeaturesNegativeLaplacian,
            delegate(SURFFeature f) { return f.Descriptor; });
         Matrix<Int32> result1;
         Matrix<double> dist1;

         featureTreePositiveLaplacian.FindFeatures(imageFeatureDescriptorsPositiveLaplacian, out result1, out dist1, 2, 20);
         MatchSURFFeatureWithFeatureTree(
           modelFeaturesPositiveLaplacian,
           imageFeaturesPositiveLaplacian,
           matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);

         featureTreeNegativeLaplacian.FindFeatures(imageFeatureDescriptorsNegativeLaplacian, out result1, out dist1, 2, 20);
         MatchSURFFeatureWithFeatureTree(
              modelFeaturesNegativeLaplacian,
              imageFeaturesNegativeLaplacian,
              matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);
         #endregion

         Matrix<float> homographyMatrix = CameraCalibration.FindHomography(
            modelPoints.ToArray(), //points on the object image
            observePoints.ToArray(), //points on the observed image
            HOMOGRAPHY_METHOD.RANSAC,
            3).Convert<float>();

         #region draw the projected object in observed image
         for (int i = 0; i < modelPoints.Count; i++)
         {
            PointF p = observePoints[i];
            p.Y += modelImage.Height;
            res.Draw(new LineSegment2DF(modelPoints[i], p), new Gray(0), 1);
         }

         System.Drawing.Rectangle rect = modelImage.ROI;
         Matrix<float> orginalCornerCoordinate = new Matrix<float>(new float[,] 
            {{  rect.Left, rect.Bottom, 1.0f},
               { rect.Right, rect.Bottom, 1.0f},
               { rect.Right, rect.Top, 1.0f},
               { rect.Left, rect.Top, 1.0f}});

         Matrix<float> destCornerCoordinate = homographyMatrix * orginalCornerCoordinate.Transpose();
         float[,] destCornerCoordinateArray = destCornerCoordinate.Data;

         Point[] destCornerPoints = new Point[4];
         for (int i = 0; i < destCornerPoints.Length; i++)
         {
            float denominator = destCornerCoordinateArray[2, i];
            destCornerPoints[i] = new Point(
               (int)(destCornerCoordinateArray[0, i] / denominator),
               (int)(destCornerCoordinateArray[1, i] / denominator) + modelImage.Height);
         }

         res.DrawPolyline(destCornerPoints, true, new Gray(255.0), 5);
//.........这里部分代码省略.........
开发者ID:AnthonyNystrom,项目名称:Pikling,代码行数:101,代码来源:SURFFeature.cs

示例4: Run

        public Image<Gray, Byte> Run(int hessianTresh, bool extended, 
            int neighbours, int emax, 
            double uniquenessThreshold,
            double scaleIncrement, int rotBins, 
            bool show)
        {
            SURFDetector detector = new SURFDetector(hessianTresh, extended); //hessianThresh=500, extended=false

            Image<Bgr, Byte> modelImageBgr = new Image<Bgr, byte>(@"images\640x480\3_purple_oval_full_cropped.bmp");//.Convert<Gray, Byte>();
            Image<Gray, Byte> modelImage = modelImageBgr.Convert<Gray, Byte>();
            //extract features from the object image
            SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref detector);

            Image<Gray, Byte> observedImage = new Image<Gray, byte>(@"images\640x480\scene1.png");
            // extract features from the observed image
            SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref detector);

            //Create a SURF Tracker using k-d Tree
            SURFTracker tracker = new SURFTracker(modelFeatures);
            //Comment out above and uncomment below if you wish to use spill-tree instead
            //SURFTracker tracker = new SURFTracker(modelFeatures, 50, .7, .1);

            SURFTracker.MatchedSURFFeature[] matchedFeatures = tracker.MatchFeature(imageFeatures, neighbours, emax); //neighbours=2, emax=20
            matchedFeatures = SURFTracker.VoteForUniqueness(matchedFeatures, uniquenessThreshold);//uniquenessThreshold=0.8
            matchedFeatures = SURFTracker.VoteForSizeAndOrientation(matchedFeatures, scaleIncrement, rotBins); //scaleIncrement=1.5, rotBins=20
            HomographyMatrix homography = SURFTracker.GetHomographyMatrixFromMatchedFeatures(matchedFeatures);

            //Merge the object image and the observed image into one image for display
            Image<Gray, Byte> res = modelImage.ConcateHorizontal(observedImage);

            #region draw lines between the matched features
            foreach (SURFTracker.MatchedSURFFeature matchedFeature in matchedFeatures)
            {
                PointF p = matchedFeature.ObservedFeature.Point.pt;
                p.X += modelImage.Width;
                res.Draw(new LineSegment2DF(matchedFeature.SimilarFeatures[0].Feature.Point.pt, p), new Gray(0), 1);
            }
            #endregion

            #region draw the project region on the image
            if (homography != null)
            {  //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[] pts = new PointF[] {
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};
                homography.ProjectPoints(pts);

                for (int i = 0; i < pts.Length; i++)
                    pts[i].Y += modelImage.Height;

                res.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Gray(255.0), 5);
            }
            #endregion

            if (show)
            {
                ImageViewer.Show(res);
            }

            return res;
        }
开发者ID:LoyVanBeek,项目名称:SetVision,代码行数:64,代码来源:FeatureCardDetector.cs

示例5: MatchImages

        private static bool MatchImages(Image<Gray, Byte> observedImage, Bitmap reference)
        {
            MCvSURFParams surfParam = new MCvSURFParams(500, false);
            Image<Gray, Byte> modelImage = new Image<Gray, byte>(reference);

            //extract features from the object image
            SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref surfParam);

            //Create a SURF Tracker
            SURFTracker tracker = new SURFTracker(modelFeatures);

            // extract features from the observed image
            SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref surfParam);

            SURFTracker.MatchedSURFFeature[] matchedFeatures = tracker.MatchFeature(imageFeatures, 2, 20);
            matchedFeatures = SURFTracker.VoteForUniqueness(matchedFeatures, accuracy);
            matchedFeatures = SURFTracker.VoteForSizeAndOrientation(matchedFeatures, 1.5, 20);
            HomographyMatrix homography = SURFTracker.GetHomographyMatrixFromMatchedFeatures(matchedFeatures);

            if (homography != null)
            {
                return true;
            }
            else
            {
                return false;
            }
        }
开发者ID:tuliosouza,项目名称:ASG,代码行数:28,代码来源:ImageHelper.cs

示例6: TestSURFDetector

        public void TestSURFDetector()
        {
            Image<Gray, byte> box = new Image<Gray, byte>("box.png");
             MCvSURFParams detector = new MCvSURFParams(400, false);

             Stopwatch watch = Stopwatch.StartNew();
             ImageFeature[] features1 = detector.DetectFeatures(box, null);
             watch.Stop();
             Trace.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

             watch.Reset(); watch.Start();
             MKeyPoint[] keypoints = detector.DetectKeyPoints(box, null);
             ImageFeature[] features2 = detector.ComputeDescriptors(box, null, keypoints);
             watch.Stop();
             Trace.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

             watch.Reset(); watch.Start();
             SURFFeature[] features3 = box.ExtractSURF(ref detector);
             watch.Stop();
             Trace.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

             PointF[] pts = Array.ConvertAll<MKeyPoint, PointF>(keypoints, delegate(MKeyPoint mkp) { return mkp.Point; });
             //SURFFeature[] features = box.ExtractSURF(pts, null, ref detector);
             //int count = features.Length;

             for (int i = 0; i < features1.Length; i++)
             {
            Assert.AreEqual(features1[i].KeyPoint.Point, features2[i].KeyPoint.Point);
            float[] d1 = features1[i].Descriptor;
            float[] d2 = features2[i].Descriptor;

            for (int j = 0; j < d1.Length; j++)
               Assert.AreEqual(d1[j], d2[j]);
             }

             foreach (MKeyPoint kp in keypoints)
             {
            box.Draw(new CircleF(kp.Point, kp.Size), new Gray(255), 1);
             }
        }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:40,代码来源:AutoTestImage.cs

示例7: TestSURF

        public void TestSURF()
        {
            for (int k = 0; k < 1; k++)
             {
            Image<Gray, Byte> modelImage = new Image<Gray, byte>("box.png");
            //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
            //modelImage = modelImage.Resize(400, 400, true);

            //modelImage._EqualizeHist();

            #region extract features from the object image
            Stopwatch stopwatch = Stopwatch.StartNew();
            MCvSURFParams param1 = new MCvSURFParams(500, false);
            SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref param1);
            SURFTracker tracker = new SURFTracker(modelFeatures);
            stopwatch.Stop();
            Trace.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            #endregion

            //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg");
            Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");
            //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0));
            //Image<Gray, Byte> observedImage = new Image<Gray, byte>("left.jpg");
            //image = image.Resize(400, 400, true);

            //observedImage._EqualizeHist();
            #region extract features from the observed image
            stopwatch.Reset(); stopwatch.Start();
            MCvSURFParams param2 = new MCvSURFParams(500, false);
            SURFFeature[] observedFeatures = observedImage.ExtractSURF(ref param2);
            stopwatch.Stop();
            Trace.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            #endregion

            //Merge the object image and the observed image into one big image for display
            Image<Gray, Byte> res = modelImage.ConcateVertical(observedImage);

            Rectangle rect = modelImage.ROI;
            PointF[] pts = new PointF[] {
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};

            HomographyMatrix homography;

            stopwatch.Reset(); stopwatch.Start();
            homography = tracker.Detect(observedFeatures, 0.8);
            stopwatch.Stop();
            Trace.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            if (homography != null)
            {
               PointF[] points = pts.Clone() as PointF[];
               homography.ProjectPoints(points);

               for (int i = 0; i < points.Length; i++)
                  points[i].Y += modelImage.Height;
               res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
            }

            stopwatch.Reset(); stopwatch.Start();
            //set the initial region to be the whole image
            using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size))
            {
               priorMask.SetValue(1.0);
               homography = tracker.CamShiftTrack(
                  observedFeatures,
                  (RectangleF)observedImage.ROI,
                  priorMask);
            }
            Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds));

            if (homography != null) //set the initial tracking window to be the whole image
            {
               PointF[] points = pts.Clone() as PointF[];
               homography.ProjectPoints(points);

               for (int i = 0; i < points.Length; i++)
                  points[i].Y += modelImage.Height;
               res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
            }
             }
        }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:83,代码来源:AutoTestImage.cs


注:本文中的Image.ExtractSURF方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。