当前位置: 首页>>代码示例>>C#>>正文


C# GpuMat.Download方法代码示例

本文整理汇总了C#中GpuMat.Download方法的典型用法代码示例。如果您正苦于以下问题:C# GpuMat.Download方法的具体用法?C# GpuMat.Download怎么用?C# GpuMat.Download使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在GpuMat的用法示例。


在下文中一共展示了GpuMat.Download方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: FindMatch

        public static void FindMatch(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out Matrix<int> indices, out Matrix<byte> mask, out HomographyMatrix homography)
        {
            int k = 2;
             double uniquenessThreshold = 0.8;
             SURFDetector surfCPU = new SURFDetector(500, false);
             Stopwatch watch;
             homography = null;
             #if !IOS
             if (GpuInvoke.HasCuda)
             {
            GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
            using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))
            //extract features from the object image
            using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
            using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
            using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>(DistanceType.L2))
            {
               modelKeyPoints = new VectorOfKeyPoint();
               surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
               watch = Stopwatch.StartNew();

               // extract features from the observed image
               using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))
               using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
               using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
               using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, k, 1, true))
               using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuObservedDescriptors.Size.Height, k, 1, true))
               using (GpuMat<Byte> gpuMask = new GpuMat<byte>(gpuMatchIndices.Size.Height, 1, 1))
               using (Stream stream = new Stream())
               {
                  matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                  indices = new Matrix<int>(gpuMatchIndices.Size);
                  mask = new Matrix<byte>(gpuMask.Size);

                  //gpu implementation of voteForUniquess
                  using (GpuMat<float> col0 = gpuMatchDist.Col(0))
                  using (GpuMat<float> col1 = gpuMatchDist.Col(1))
                  {
                     GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                     GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                  }

                  observedKeyPoints = new VectorOfKeyPoint();
                  surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                  //wait for the stream to complete its tasks
                  //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                  stream.WaitForCompletion();

                  gpuMask.Download(mask);
                  gpuMatchIndices.Download(indices);

                  if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                  {
                     int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                  }

                  watch.Stop();
               }
            }
             }
             else
             #endif
             {
            //extract features from the object image
            modelKeyPoints = new VectorOfKeyPoint();
            Matrix<float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);

            watch = Stopwatch.StartNew();

            // extract features from the observed image
            observedKeyPoints = new VectorOfKeyPoint();
            Matrix<float> observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add(modelDescriptors);

            indices = new Matrix<int>(observedDescriptors.Rows, k);
            using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
            {
               matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
               mask = new Matrix<byte>(dist.Rows, 1);
               mask.SetValue(255);
               Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if (nonZeroCount >= 4)
            {
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
               if (nonZeroCount >= 4)
                  homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
            }

            watch.Stop();
             }
             matchTime = watch.ElapsedMilliseconds;
        }
开发者ID:Huong-nt,项目名称:SUFT-detecttion-EMGU,代码行数:99,代码来源:DrawMatches.cs

示例2: Draw

        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Image<Bgr, Byte> Draw(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime)
        {
            Stopwatch watch;
            HomographyMatrix homography = null;

            SURFDetector surfCPU = new SURFDetector (500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix<int> indices;

            Matrix<byte> mask;
            int k = 2;
            double uniquenessThreshold = 0.8;
            if (GpuInvoke.HasCuda) {
                GpuSURFDetector surfGPU = new GpuSURFDetector (surfCPU.SURFParams, 0.01f);
                using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte> (modelImage))
                    //extract features from the object image
                using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw (gpuModelImage, null))
                using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw (gpuModelImage, null, gpuModelKeyPoints))
                using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float> (DistanceType.L2)) {
                    modelKeyPoints = new VectorOfKeyPoint ();
                    surfGPU.DownloadKeypoints (gpuModelKeyPoints, modelKeyPoints);
                    watch = Stopwatch.StartNew ();

                    // extract features from the observed image
                    using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte> (observedImage))
                    using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw (gpuObservedImage, null))
                    using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw (gpuObservedImage, null, gpuObservedKeyPoints))
                    using (GpuMat<int> gpuMatchIndices = new GpuMat<int> (gpuObservedDescriptors.Size.Height, k, 1, true))
                    using (GpuMat<float> gpuMatchDist = new GpuMat<float> (gpuObservedDescriptors.Size.Height, k, 1, true))
                    using (GpuMat<Byte> gpuMask = new GpuMat<byte> (gpuMatchIndices.Size.Height, 1, 1))
                    using (Stream stream = new Stream ()) {
                        matcher.KnnMatchSingle (gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                        indices = new Matrix<int> (gpuMatchIndices.Size);
                        mask = new Matrix<byte> (gpuMask.Size);

                        //gpu implementation of voteForUniquess
                        using (GpuMat<float> col0 = gpuMatchDist.Col (0))
                        using (GpuMat<float> col1 = gpuMatchDist.Col (1)) {
                            GpuInvoke.Multiply (col1, new MCvScalar (uniquenessThreshold), col1, stream);
                            GpuInvoke.Compare (col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                        }

                        observedKeyPoints = new VectorOfKeyPoint ();
                        surfGPU.DownloadKeypoints (gpuObservedKeyPoints, observedKeyPoints);

                        //wait for the stream to complete its tasks
                        //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                        stream.WaitForCompletion ();

                        gpuMask.Download (mask);
                        gpuMatchIndices.Download (indices);

                        if (GpuInvoke.CountNonZero (gpuMask) >= 4) {
                            int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation (modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                            if (nonZeroCount >= 4)
                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures (modelKeyPoints, observedKeyPoints, indices, mask, 2);
                        }

                        watch.Stop ();
                    }
                }
            } else {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw (modelImage, null);
                Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw (modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew ();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw (observedImage, null);
                Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw (observedImage, null, observedKeyPoints);
                BruteForceMatcher<float> matcher = new BruteForceMatcher<float> (DistanceType.L2);
                matcher.Add (modelDescriptors);

                indices = new Matrix<int> (observedDescriptors.Rows, k);
                using (Matrix<float> dist = new Matrix<float> (observedDescriptors.Rows, k)) {
                    matcher.KnnMatch (observedDescriptors, indices, dist, k, null);
                    mask = new Matrix<byte> (dist.Rows, 1);
                    mask.SetValue (255);
                    Features2DToolbox.VoteForUniqueness (dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero (mask);
                if (nonZeroCount >= 4) {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation (modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures (modelKeyPoints, observedKeyPoints, indices, mask, 2);
                }

                watch.Stop ();
            }

//.........这里部分代码省略.........
开发者ID:kenlimmj,项目名称:quetzalcoatl,代码行数:101,代码来源:MyPage.xaml.cs

示例3: TestPyrLK

      /*
      public void TestPyrLK()
      {
         const int MAX_CORNERS = 500;
         Capture c = new Capture();
         ImageViewer viewer = new ImageViewer();
         Image<Gray, Byte> oldImage = null;
         Image<Gray, Byte> currentImage = null;
         Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
         {
            if (oldImage == null)
            {
               oldImage = c.QueryGrayFrame();
            }

            currentImage = c.QueryGrayFrame();
            Features2D.GFTTDetector detector = new Features2D.GFTTDetector(MAX_CORNERS, 0.05, 3, 3);
            
            //PointF[] features = oldImage.GoodFeaturesToTrack(MAX_CORNERS, 0.05, 3.0, 3, false, 0.04)[0];
            PointF[] shiftedFeatures;
            Byte[] status;
            float[] trackErrors;
            CvInvoke.CalcOpticalFlowPyrLK(oldImage, currentImage, features, new Size(9, 9), 3, new MCvTermCriteria(20, 0.05),
               out shiftedFeatures, out status, out trackErrors);

            Image<Gray, Byte> displayImage = currentImage.Clone();
            for (int i = 0; i < features.Length; i++)
               displayImage.Draw(new LineSegment2DF(features[i], shiftedFeatures[i]), new Gray(), 2);

            oldImage = currentImage;
            viewer.Image = displayImage;
         });
         viewer.ShowDialog();
      }*/

     
      public void TestPyrLKGPU()
      {
         if (!CudaInvoke.HasCuda)
            return;

         const int MAX_CORNERS = 500;
         Capture c = new Capture();
         ImageViewer viewer = new ImageViewer();
         GpuMat oldImage = null;
         GpuMat currentImage = null;
         using (CudaGoodFeaturesToTrackDetector detector = new CudaGoodFeaturesToTrackDetector(DepthType.Cv8U, 1, MAX_CORNERS, 0.05, 3.0, 3, false, 0.04))
         using (CudaDensePyrLKOpticalFlow flow = new CudaDensePyrLKOpticalFlow(new Size(21, 21), 3, 30, false))
         {
            Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
            {
               if (oldImage == null)
               {
                  Mat bgrFrame = c.QueryFrame();
                  using (GpuMat oldBgrImage = new GpuMat(bgrFrame))
                  {
                     oldImage = new GpuMat();
                     CudaInvoke.CvtColor(oldBgrImage, oldImage, ColorConversion.Bgr2Gray);
                  }
               }

               using (Mat tmpFrame = c.QueryFrame())
               using (GpuMat tmp = new GpuMat(tmpFrame))
               {
                  currentImage = new GpuMat();
                  CudaInvoke.CvtColor(tmp, currentImage, ColorConversion.Bgr2Gray);
               }
               using (GpuMat f = new GpuMat())
               
               using (GpuMat vertex = new GpuMat())
               using (GpuMat colors = new GpuMat())
               using(GpuMat corners = new GpuMat())
               {
                  flow.Calc(oldImage, currentImage, f);

                  //CudaInvoke.CreateOpticalFlowNeedleMap(u, v, vertex, colors);
                  detector.Detect(oldImage, corners, null);
                  //GpuMat<float> detector.Detect(oldImage, null);
                  /*
                  //PointF[] features = oldImage.GoodFeaturesToTrack(MAX_CORNERS, 0.05, 3.0, 3, false, 0.04)[0];
                  PointF[] shiftedFeatures;
                  Byte[] status;
                  float[] trackErrors;
                  OpticalFlow.PyrLK(oldImage, currentImage, features, new Size(9, 9), 3, new MCvTermCriteria(20, 0.05),
                     out shiftedFeatures, out status, out trackErrors);
                  */

                  Mat displayImage = new Mat();
                  currentImage.Download(displayImage);
                      
                  /*
                  for (int i = 0; i < features.Length; i++)
                     displayImage.Draw(new LineSegment2DF(features[i], shiftedFeatures[i]), new Gray(), 2);
                  */
                  oldImage = currentImage;
                  viewer.Image = displayImage;
               }
            });
            viewer.ShowDialog();
         }
//.........这里部分代码省略.........
开发者ID:Warren-GH,项目名称:emgucv,代码行数:101,代码来源:Class1.cs

示例4: Solve

        public Image<Gray, byte> Solve(Image<Gray, byte> left, Image<Gray, byte> right)
        {
            var size = left.Size;

            using (var leftGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var rightGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var disparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var filteredDisparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var filteredDisparity16S = new Mat(size, DepthType.Cv16S, 1))
            using (var filteredDisparity8U = new Mat(size, DepthType.Cv8U, 1))
            {
                leftGpu.Upload(left.Mat);
                rightGpu.Upload(right.Mat);

                algorithm.FindStereoCorrespondence(leftGpu, rightGpu, disparityGpu);

                filter.Apply(disparityGpu, leftGpu, filteredDisparityGpu);

                filteredDisparityGpu.Download(filteredDisparity16S);

                CvInvoke.MinMaxLoc(filteredDisparity16S, ref min, ref max, ref minPosition, ref maxPosition);

                filteredDisparity16S.ConvertTo(filteredDisparity8U, DepthType.Cv8U, 255.0/(Max - Min));

                return new Image<Gray, byte>(filteredDisparity8U.Bitmap);
            }
        }
开发者ID:rachwal,项目名称:RTM-Tools,代码行数:27,代码来源:GPUDisparitySolver.cs

示例5: FindModelImageInObservedImage

        public static bool FindModelImageInObservedImage( Image<Gray, byte> modelImage, Image<Gray, byte> observedImage )
        {
            var surfCpu = new SURFDetector(500, false);
             VectorOfKeyPoint modelKeyPoints;
             VectorOfKeyPoint observedKeyPoints;
             Matrix<int> indices;

             Matrix<byte> mask;
             int k = 2;
             double uniquenessThreshold = 0.8;
             if ( GpuInvoke.HasCuda )
             {
            GpuSURFDetector surfGpu = new GpuSURFDetector(surfCpu.SURFParams, 0.01f);
            using ( GpuImage<Gray, byte> gpuModelImage = new GpuImage<Gray, byte>( modelImage ) )
            //extract features from the object image
            using ( GpuMat<float> gpuModelKeyPoints = surfGpu.DetectKeyPointsRaw( gpuModelImage, null ) )
            using ( GpuMat<float> gpuModelDescriptors = surfGpu.ComputeDescriptorsRaw( gpuModelImage, null, gpuModelKeyPoints ) )
            using ( GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>( DistanceType.L2 ) )
            {
               modelKeyPoints = new VectorOfKeyPoint();
               surfGpu.DownloadKeypoints( gpuModelKeyPoints, modelKeyPoints );

               // extract features from the observed image
               using ( GpuImage<Gray, byte> gpuObservedImage = new GpuImage<Gray, byte>( observedImage ) )
               using ( GpuMat<float> gpuObservedKeyPoints = surfGpu.DetectKeyPointsRaw( gpuObservedImage, null ) )
               using ( GpuMat<float> gpuObservedDescriptors = surfGpu.ComputeDescriptorsRaw( gpuObservedImage, null, gpuObservedKeyPoints ) )
               using ( GpuMat<int> gpuMatchIndices = new GpuMat<int>( gpuObservedDescriptors.Size.Height, k, 1, true ) )
               using ( GpuMat<float> gpuMatchDist = new GpuMat<float>( gpuObservedDescriptors.Size.Height, k, 1, true ) )
               using ( GpuMat<Byte> gpuMask = new GpuMat<byte>( gpuMatchIndices.Size.Height, 1, 1 ) )
               using ( var stream = new Emgu.CV.GPU.Stream() )
               {
                  matcher.KnnMatchSingle( gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream );
                  indices = new Matrix<int>( gpuMatchIndices.Size );
                  mask = new Matrix<byte>( gpuMask.Size );

                  //gpu implementation of voteForUniquess
                  using ( GpuMat<float> col0 = gpuMatchDist.Col( 0 ) )
                  using ( GpuMat<float> col1 = gpuMatchDist.Col( 1 ) )
                  {
                     GpuInvoke.Multiply( col1, new MCvScalar( uniquenessThreshold ), col1, stream );
                     GpuInvoke.Compare( col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream );
                  }

                  observedKeyPoints = new VectorOfKeyPoint();
                  surfGpu.DownloadKeypoints( gpuObservedKeyPoints, observedKeyPoints );

                  //wait for the stream to complete its tasks
                  //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                  stream.WaitForCompletion();

                  gpuMask.Download( mask );
                  gpuMatchIndices.Download( indices );

                  if ( GpuInvoke.CountNonZero( gpuMask ) >= 4 )
                  {
                     int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                     if ( nonZeroCount >= 4 )
                     {
                        Features2DToolbox.GetHomographyMatrixFromMatchedFeatures( modelKeyPoints, observedKeyPoints, indices, mask, 2 );
                     }
                     if ( (double)nonZeroCount / mask.Height > 0.02 )
                     {
                        return true;
                     }
                  }
               }
            }
             }
             else
             {
            //extract features from the object image
            modelKeyPoints = surfCpu.DetectKeyPointsRaw( modelImage, null );
            Matrix<float> modelDescriptors = surfCpu.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

            // extract features from the observed image
            observedKeyPoints = surfCpu.DetectKeyPointsRaw( observedImage, null );
            Matrix<float> observedDescriptors = surfCpu.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add( modelDescriptors );

            indices = new Matrix<int>( observedDescriptors.Rows, k );
            using ( Matrix<float> dist = new Matrix<float>( observedDescriptors.Rows, k ) )
            {
               matcher.KnnMatch( observedDescriptors, indices, dist, k, null );
               mask = new Matrix<byte>( dist.Rows, 1 );
               mask.SetValue( 255 );
               Features2DToolbox.VoteForUniqueness( dist, uniquenessThreshold, mask );
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if ( nonZeroCount >= 4 )
            {
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation( modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20 );
               if ( nonZeroCount >= 4 )
               {
                  Features2DToolbox.GetHomographyMatrixFromMatchedFeatures( modelKeyPoints, observedKeyPoints, indices, mask, 2 );
               }
            }

            if ( (double)nonZeroCount/mask.Height > 0.02 )
//.........这里部分代码省略.........
开发者ID:dmarkachev,项目名称:CSE803Project,代码行数:101,代码来源:SurfClassifier.cs

示例6: TestCudaPyrLKOpticalFlow

      public void TestCudaPyrLKOpticalFlow()
      {
         if (!CudaInvoke.HasCuda)
            return;
         Image<Gray, Byte> prevImg, currImg;
         AutoTestVarious.OpticalFlowImage(out prevImg, out currImg);
         Mat flow = new Mat();
         CudaDensePyrLKOpticalFlow opticalflow = new CudaDensePyrLKOpticalFlow(new Size(21, 21), 3, 30, false);
         using (CudaImage<Gray, Byte> prevGpu = new CudaImage<Gray, byte>(prevImg))
         using (CudaImage<Gray, byte> currGpu = new CudaImage<Gray, byte>(currImg))
         using (GpuMat flowGpu = new GpuMat())
         {
            opticalflow.Calc(prevGpu, currGpu, flowGpu);

            flowGpu.Download(flow);
         }
         
      }
开发者ID:Warren-GH,项目名称:emgucv,代码行数:18,代码来源:AutoTestCuda.cs

示例7: TestSplitMerge

      public void TestSplitMerge()
      {
         if (CudaInvoke.HasCuda)
         {
            using (Image<Bgr, Byte> img1 = new Image<Bgr, byte>(1200, 640))
            {
               img1.SetRandUniform(new MCvScalar(0, 0, 0), new MCvScalar(255, 255, 255));

               using (GpuMat gpuImg1 = new GpuMat(img1))
               {
                  GpuMat[] channels = gpuImg1.Split(null);

                  for (int i = 0; i < channels.Length; i++)
                  {
                     Mat imgL = channels[i].ToMat();
                     Image<Gray, Byte> imgR = img1[i];
                     Assert.IsTrue(imgL.Equals(imgR.Mat), "failed split GpuMat");
                  }

                  using (GpuMat gpuImg2 = new GpuMat())
                  {
                     gpuImg2.MergeFrom(channels, null);
                     using (Image<Bgr, byte> img2 = new Image<Bgr, byte>(img1.Size))
                     {
                        gpuImg2.Download(img2);
                        Assert.IsTrue(img2.Equals(img1), "failed split and merge test");
                     }
                  }

                  for (int i = 0; i < channels.Length; i++)
                  {
                     channels[i].Dispose();
                  }
               }
            }
         }
      }
开发者ID:Warren-GH,项目名称:emgucv,代码行数:37,代码来源:AutoTestCuda.cs

示例8: TestCudaBroxOpticalFlow

      public void TestCudaBroxOpticalFlow()
      {
         if (!CudaInvoke.HasCuda)
            return;
         Image<Gray, Byte> prevImg, currImg;
         AutoTestVarious.OpticalFlowImage(out prevImg, out currImg);
         Mat flow = new Mat();
         CudaBroxOpticalFlow opticalflow = new CudaBroxOpticalFlow();
         using (CudaImage<Gray, float> prevGpu = new CudaImage<Gray, float>(prevImg.Convert<Gray, float>()))
         using (CudaImage<Gray, float> currGpu = new CudaImage<Gray, float>(currImg.Convert<Gray, float>()))
         using (GpuMat flowGpu = new GpuMat())
         {
            opticalflow.Calc(prevGpu, currGpu, flowGpu);

            flowGpu.Download(flow);
         }
      }
开发者ID:neutmute,项目名称:emgucv,代码行数:17,代码来源:AutoTestCuda.cs

示例9: TestCudaUploadDownload

      public void TestCudaUploadDownload()
      {
         if (!CudaInvoke.HasCuda)
            return;

         Mat m = new Mat(new Size(480, 320), DepthType.Cv8U, 3);
         CvInvoke.Randu(m, new MCvScalar(), new MCvScalar(255, 255, 255) );

         #region test for async download & upload
         Stream stream = new Stream();
         GpuMat gm1 = new GpuMat();
         gm1.Upload(m, stream);

         Mat m2 = new Mat();
         gm1.Download(m2, stream);

         stream.WaitForCompletion();
         EmguAssert.IsTrue(m.Equals(m2));
         #endregion

         #region test for blocking download & upload
         GpuMat gm2 = new GpuMat();
         gm2.Upload(m);
         Mat m3 = new Mat();
         gm2.Download(m3);
         EmguAssert.IsTrue(m.Equals(m3));
         #endregion
      }
开发者ID:neutmute,项目名称:emgucv,代码行数:28,代码来源:AutoTestCuda.cs


注:本文中的GpuMat.Download方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。