当前位置: 首页>>代码示例>>C#>>正文


C# Mat.ToUMat方法代码示例

本文整理汇总了C#中Mat.ToUMat方法的典型用法代码示例。如果您正苦于以下问题:C# Mat.ToUMat方法的具体用法?C# Mat.ToUMat怎么用?C# Mat.ToUMat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat的用法示例。


在下文中一共展示了Mat.ToUMat方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: FindMatch

      public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
      {
         int k = 2;
         double uniquenessThreshold = 0.8;
         double hessianThresh = 300;
         
         Stopwatch watch;
         homography = null;

         modelKeyPoints = new VectorOfKeyPoint();
         observedKeyPoints = new VectorOfKeyPoint();

         #if !__IOS__
         if ( CudaInvoke.HasCuda)
         {
            CudaSURF surfCuda = new CudaSURF((float) hessianThresh);
            using (GpuMat gpuModelImage = new GpuMat(modelImage))
            //extract features from the object image
            using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
            using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
            {
               surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
               watch = Stopwatch.StartNew();

               // extract features from the observed image
               using (GpuMat gpuObservedImage = new GpuMat(observedImage))
               using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
               using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
               //using (GpuMat tmp = new GpuMat())
               //using (Stream stream = new Stream())
               {
                  matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

                  surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                  mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                  mask.SetTo(new MCvScalar(255));
                  Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                  int nonZeroCount = CvInvoke.CountNonZero(mask);
                  if (nonZeroCount >= 4)
                  {
                     nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                        matches, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                           observedKeyPoints, matches, mask, 2);
                  }
               }
                  watch.Stop();
               }
            }
         else
         #endif
         {
            using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
            using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
            {
               SURF surfCPU = new SURF(hessianThresh);
               //extract features from the object image
               UMat modelDescriptors = new UMat();
               surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

               watch = Stopwatch.StartNew();

               // extract features from the observed image
               UMat observedDescriptors = new UMat();
               surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
               BFMatcher matcher = new BFMatcher(DistanceType.L2);
               matcher.Add(modelDescriptors);

               matcher.KnnMatch(observedDescriptors, matches, k, null);
               mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
               mask.SetTo(new MCvScalar(255));
               Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

               int nonZeroCount = CvInvoke.CountNonZero(mask);
               if (nonZeroCount >= 4)
               {
                  nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                     matches, mask, 1.5, 20);
                  if (nonZeroCount >= 4)
                     homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                        observedKeyPoints, matches, mask, 2);
               }

               watch.Stop();
            }
         }
         matchTime = watch.ElapsedMilliseconds;
      }
开发者ID:Delaley,项目名称:emgucv,代码行数:92,代码来源:DrawMatches.cs

示例2: calculatedescriptors

 private void calculatedescriptors(Mat image, UMat imageDescriptors, VectorOfKeyPoint imageKeyPoints)
 {
     using (UMat mImage = image.ToUMat(Emgu.CV.CvEnum.AccessType.Read))
     {
         SIFT sift = new SIFT();
         sift.DetectAndCompute(mImage, null, imageKeyPoints, imageDescriptors, false);
     }
 }
开发者ID:aliluczak,项目名称:projektLWKForms,代码行数:8,代码来源:Form1.cs

示例3: LicensePlateRecognitionForm

 public LicensePlateRecognitionForm()
 {
     InitializeComponent();
      _licensePlateDetector = new LicensePlateDetector("");
      Mat m = new Mat("license-plate.jpg", LoadImageType.AnyColor);
      UMat um = m.ToUMat(AccessType.ReadWrite);
      imageBox1.Image = um;
      ProcessImage(m);
 }
开发者ID:tonsville,项目名称:Anpr-Project,代码行数:9,代码来源:LicensePlateRecognitionForm.cs

示例4: Find

        /// <summary>
        /// Find the pedestrian in the image
        /// </summary>
        /// <param name="image">The image</param>
        /// <param name="processingTime">The pedestrian detection time in milliseconds</param>
        /// <returns>The region where pedestrians are detected</returns>
        public static Rectangle[] Find(Mat image, bool tryUseCuda, bool tryUseOpenCL, out long processingTime)
        {
            Stopwatch watch;
            Rectangle[] regions;

#if !(IOS || NETFX_CORE)
            //check if there is a compatible Cuda device to run pedestrian detection
            if (tryUseCuda && CudaInvoke.HasCuda)
            {  //this is the Cuda version
                using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8)))
                {
                    des.SetSVMDetector(des.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();
                    using (GpuMat cudaBgr = new GpuMat(image))
                    using (GpuMat cudaBgra = new GpuMat())
                    using (VectorOfRect vr = new VectorOfRect())
                    {
                        CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra);
                        des.DetectMultiScale(cudaBgra, vr);
                        regions = vr.ToArray();
                    }
                }
            }
            else
#endif
            {
                //Many opencl functions require opencl compatible gpu devices. 
                //As of opencv 3.0-alpha, opencv will crash if opencl is enable and only opencv compatible cpu device is presented
                //So we need to call CvInvoke.HaveOpenCLCompatibleGpuDevice instead of CvInvoke.HaveOpenCL (which also returns true on a system that only have cpu opencl devices).
                CvInvoke.UseOpenCL = tryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice;

                //this is the CPU/OpenCL version
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());

                    //load the image to umat so it will automatically use opencl is available
                    UMat umat = image.ToUMat(AccessType.Read);

                    watch = Stopwatch.StartNew();

                    MCvObjectDetection[] results = des.DetectMultiScale(umat);
                    regions = new Rectangle[results.Length];
                    for (int i = 0; i < results.Length; i++)
                        regions[i] = results[i].Rect;
                    watch.Stop();
                }
            }

            processingTime = watch.ElapsedMilliseconds;

            return regions;
        }
开发者ID:mldasilva,项目名称:FireKam,代码行数:60,代码来源:FindPedestrian.cs

示例5: Find

      /// <summary>
      /// Find the pedestrian in the image
      /// </summary>
      /// <param name="image">The image</param>
      /// <param name="processingTime">The pedestrian detection time in milliseconds</param>
      /// <returns>The region where pedestrians are detected</returns>
      public static Rectangle[] Find(Mat image, bool tryUseCuda, out long processingTime)
      {
         Stopwatch watch;
         Rectangle[] regions;

#if !(__IOS__ || NETFX_CORE)
         //check if there is a compatible Cuda device to run pedestrian detection
         if (tryUseCuda && CudaInvoke.HasCuda)
         {  //this is the Cuda version
            using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8,8), new Size(8,8)))
            {
               des.SetSVMDetector(des.GetDefaultPeopleDetector());

               watch = Stopwatch.StartNew();
               using (GpuMat cudaBgr = new GpuMat(image))
               using (GpuMat cudaBgra = new GpuMat() )
               using (VectorOfRect vr = new VectorOfRect())
               {
                  CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra);
                  des.DetectMultiScale(cudaBgra, vr);
                  regions = vr.ToArray();
               }
            }
         }
         else
#endif
         {  
            //this is the CPU/OpenCL version
            using (HOGDescriptor des = new HOGDescriptor())
            {
               des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
               
               //load the image to umat so it will automatically use opencl is available
               UMat umat = image.ToUMat(AccessType.Read);

               watch = Stopwatch.StartNew();
               
               MCvObjectDetection[] results = des.DetectMultiScale(umat);
               regions = new Rectangle[results.Length];
               for (int i = 0; i < results.Length; i++)
                  regions[i] = results[i].Rect;
               watch.Stop();
            }
         }
        
         processingTime = watch.ElapsedMilliseconds;

         return regions;
      }
开发者ID:Delaley,项目名称:emgucv,代码行数:55,代码来源:FindPedestrian.cs

示例6: GetMaskedBitmap

        private Drawing.Bitmap GetMaskedBitmap(string imagePath, IList<Point> pointCollection)
        {
            Mat matrix = new Mat(imagePath, LoadImageType.AnyColor);
            UMat uMatrix = matrix.ToUMat(AccessType.ReadWrite);

            // Scale Polygon
            List<Point> scaledPoints = GetScaledPoints(pointCollection, uMatrix.Rows, uMatrix.Cols);

            polygonPoints = GetPolygonPoints(scaledPoints, uMatrix.Rows, uMatrix.Cols);

            // Apply Polygon
            using (VectorOfPoint vPoint = new VectorOfPoint(polygonPoints.ToArray()))
            using (VectorOfVectorOfPoint vvPoint = new VectorOfVectorOfPoint(vPoint))
            {
                CvInvoke.FillPoly(uMatrix, vvPoint, new Bgr(0, 0, 0).MCvScalar);
            }

            // Crop Bitmap
            int left = (int)scaledPoints.Min(p => p.X);
            int top = (int)scaledPoints.Min(p => p.Y);
            int width = (int)scaledPoints.Max(p => p.X) - left;
            int height = (int)scaledPoints.Max(p => p.Y) - top;

            Image<Bgr, byte> image = new Image<Bgr, byte>(uMatrix.Bitmap);
            image.ROI = new Drawing.Rectangle(left, top, width, height);

            return image.Bitmap;
        }
开发者ID:uzapy,项目名称:ch.bfh.bti7302.w2015.schneedetektion,代码行数:28,代码来源:OpenCVHelper.cs


注:本文中的Mat.ToUMat方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。