当前位置: 首页>>代码示例>>C#>>正文


C# Matrix.SetValue方法代码示例

本文整理汇总了C#中System.Matrix.SetValue方法的典型用法代码示例。如果您正苦于以下问题:C# Matrix.SetValue方法的具体用法?C# Matrix.SetValue怎么用?C# Matrix.SetValue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在System.Matrix的用法示例。


在下文中一共展示了Matrix.SetValue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: TestNot

      public void TestNot()
      {
         Matrix<byte> m = new Matrix<byte>(10, 8);
         m.SetValue(1.0);
         m._Not();
         byte[,] d2 = m.Data;

         foreach (byte v in d2)
            EmguAssert.IsTrue(254.0 == v);
      }
开发者ID:reidblomquist,项目名称:emgucv,代码行数:10,代码来源:AutoTestMatrix.cs

示例2: Evaluate

        public void Evaluate(int SpreadMax)
        {
            FStatus.SliceCount = SpreadMax;
            FOutPositions1.SliceCount = SpreadMax;
            FOutPositions2.SliceCount = SpreadMax;

            for (int i = 0; i < SpreadMax; i++)
            {
                if (!FDo[i])
                    continue;

                var input1 = FInput1[i];
                var input2 = FInput2[i];

                if (input1 == null || input2 == null)
                    continue;
                if (!input1.Allocated || !input2.Allocated)
                    continue;

                Matrix<byte> mask;
                var matcher = new BruteForceMatcher<float>(DistanceType.L2);
                matcher.Add(input2.Descriptors);

                var indices = new Matrix<int>(input1.Descriptors.Rows, 2);
                using (Matrix<float> distance = new Matrix<float>(input1.Descriptors.Rows, 2))
                {
                    matcher.KnnMatch(input1.Descriptors, indices, distance, 2, null);
                    mask = new Matrix<byte>(distance.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(distance, FUniqueness[i], mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(input2.KeyPoints, input1.KeyPoints, indices, mask, 1.5, 20);

                var positions1 = FOutPositions1[i];
                var positions2 = FOutPositions2[i];

                positions1.SliceCount = 0;
                positions2.SliceCount = 0;

                for (int j = 0; j < mask.Rows; j++)
                {
                    if (mask[j, 0] != 0)
                    {
                        var index2 = indices[j, 0];
                        var point1 = input1.KeyPoints[j];
                        var point2 = input2.KeyPoints[index2];

                        positions1.Add(new Vector2D(point1.Point.X, point1.Point.Y));
                        positions2.Add(new Vector2D(point2.Point.X, point2.Point.Y));
                    }
                }
            }
        }
开发者ID:kopffarben,项目名称:VVVV.Packs.Image,代码行数:55,代码来源:MatchFeatures.cs

示例3: Test

        public static void Test()
        {
            Matrix mA = new Matrix(2, 3);
            Matrix mB = new Matrix(3, 2);
            Matrix mC = new Matrix(2, 2);

            mA.SetValue(0, 0, 0.11);
            mA.SetValue(0, 1, 0.12);
            mA.SetValue(0, 2, 0.13);
            mA.SetValue(1, 0, 0.21);
            mA.SetValue(1, 1, 0.22);
            mA.SetValue(1, 2, 0.23);

            mB.SetValue(0, 0, 1011);
            mB.SetValue(0, 1, 1012);
            mB.SetValue(1, 0, 1021);
            mB.SetValue(1, 1, 1022);
            mB.SetValue(2, 0, 1031);
            mB.SetValue(2, 1, 1032);

            Blas.DGemm(Blas.TransposeType.NoTranspose, Blas.TransposeType.NoTranspose, 1.0, mA, mB, 0.0, ref mC);

            Console.WriteLine(mC.GetValue(0, 0) + " , " + mC.GetValue(0, 1));
            Console.WriteLine(mC.GetValue(1, 0) + " , " + mC.GetValue(1, 1));
        }
开发者ID:mastobaev,项目名称:gsldotnet,代码行数:25,代码来源:BlasTester.cs

示例4: Test2

        public static void Test2()
        {
            const uint MARGIN = 1;

            Matrix mA = new Matrix(2 + MARGIN, 3 + MARGIN);
            Matrix mB = new Matrix(3, 2);
            Matrix mC = new Matrix(2, 2);

            mA.SetValue(0 + MARGIN, 0 + MARGIN, 0.11);
            mA.SetValue(0 + MARGIN, 1 + MARGIN, 0.12);
            mA.SetValue(0 + MARGIN, 2 + MARGIN, 0.13);
            mA.SetValue(1 + MARGIN, 0 + MARGIN, 0.21);
            mA.SetValue(1 + MARGIN, 1 + MARGIN, 0.22);
            mA.SetValue(1 + MARGIN, 2 + MARGIN, 0.23);

            mB.SetValue(0, 0, 1011);
            mB.SetValue(0, 1, 1012);
            mB.SetValue(1, 0, 1021);
            mB.SetValue(1, 1, 1022);
            mB.SetValue(2, 0, 1031);
            mB.SetValue(2, 1, 1032);

            MatrixView mViewA = new MatrixView(mA, MARGIN, MARGIN, mA.Columns - MARGIN, mA.Rows - MARGIN);
            MatrixView mViewB = new MatrixView(mB, 0, 0, mB.Columns, mB.Rows);
            MatrixView mViewC = new MatrixView(mC, 0, 0, mC.Columns, mC.Rows);
            Blas.DGemm(Blas.TransposeType.NoTranspose, Blas.TransposeType.NoTranspose, 1.0, mViewA, mViewB, 0.0, ref mViewC);

            Console.WriteLine(mC.GetValue(0, 0) + " , " + mC.GetValue(0, 1));
            Console.WriteLine(mC.GetValue(1, 0) + " , " + mC.GetValue(1, 1));
        }
开发者ID:mastobaev,项目名称:gsldotnet,代码行数:30,代码来源:BlasTester.cs

示例5: Recognize

 public bool Recognize(Image<Gray, Byte> observedImage, out PointF[] Region)
 {
     // extract features from the observed image
     observedKeyPoints = new VectorOfKeyPoint();
     Matrix<float> observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
     BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
     matcher.Add(modelDescriptors);
     indices = new Matrix<int>(observedDescriptors.Rows, k);
     using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
     {
         matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
         mask = new Matrix<byte>(dist.Rows, 1);
         mask.SetValue(255);
         Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
     }
     int nonZeroCount = CvInvoke.cvCountNonZero(mask);
     if (nonZeroCount >= requiredNonZeroCount)
     {
         nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, scaleIncrement, RotationBins);
         if (nonZeroCount >= requiredNonZeroCount)
             homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, ransacReprojThreshold);
     }
     bool ObjectFound;
     if (homography != null)
     {  //draw a rectangle along the projected model
         Rectangle rect = modelImage.ROI;
         Region = new PointF[] {
         new PointF(rect.Left, rect.Bottom),
         new PointF(rect.Right, rect.Bottom),
         new PointF(rect.Right, rect.Top),
         new PointF(rect.Left, rect.Top)};
         homography.ProjectPoints(Region);
         ObjectFound = true;
     }
     else
     {
         Region = null;
         ObjectFound = false;
     }
     return ObjectFound;
 }
开发者ID:JonHoy,项目名称:Robotic_Arm,代码行数:41,代码来源:SurfRecognizer.cs

示例6: DrawResult

        public Image<Bgr, byte> DrawResult(Image<Gray, byte> modelImage, Image<Gray, byte> observedImage,out double area, int minarea, out Point center)
        {
            //double estimated_dist =99999;
            center = new Point(400, 224);
            area = 0;
            //modelImage.Save("D:\\temp\\modelimage.jpg");
            //observedImage.Save("D:\\temp\\observedimage.jpg");

            //单应矩阵
            HomographyMatrix homography = null;

            //surf算法检测器
            var surfCpu = new SURFDetector(500, false);

            //原图与实际图中的关键点

            Matrix<byte> mask;

            //knn匹配的系数
            var k = 2;
            //滤波系数
            var uniquenessThreshold = 0.8;

            //从标记图中,提取surf特征点与描述子
            var modelKeyPoints = surfCpu.DetectKeyPointsRaw(modelImage, null);
            var modelDescriptors = surfCpu.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

            // 从实际图片提取surf特征点与描述子
            var observedKeyPoints = surfCpu.DetectKeyPointsRaw(observedImage, null);
            var observedDescriptors = surfCpu.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
            if (observedDescriptors == null)
            {

                return null;
            }

            //使用BF匹配算法,匹配特征向量
            //var bfmatcher = new BruteForceMatcher<float>(DistanceType.L2);
            //bfmatcher.Add(modelDescriptors);
            var indices = new Matrix<int>(observedDescriptors.Rows, k);
            var flannMatcher = new Index(modelDescriptors, 4);
            //通过特征向量筛选匹配对
            using (var dist = new Matrix<float>(observedDescriptors.Rows, k))
            {
                //最近邻2点特征向量匹配
                //bfmatcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                flannMatcher.KnnSearch(observedDescriptors, indices, dist, k, 24);
                //匹配成功的,将特征点存入mask
                mask = new Matrix<byte>(dist.Rows, 1);
                mask.SetValue(255);
                //通过滤波系数,过滤非特征点,剩余特征点存入mask
                Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            var nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if (nonZeroCount >= 10)
            {
                //过滤旋转与变形系数异常的特征点,剩余存入mask
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices,
                    mask, 1.5, 20);
                if (nonZeroCount >= 10)
                    //使用剩余特征点,构建单应矩阵
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                        observedKeyPoints, indices, mask, 2);
            }

            // }

            //画出匹配的特征点
            //Image<Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,indices, new Bgr(0, 0, 255), new Bgr(0, 255, 0), mask, Features2DToolbox.KeypointDrawType.DEFAULT);
            // result.Save("D:\\temp\\matchedpoints.jpg");
            observedImage.ToBitmap();
            var result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                indices, new Bgr(0, 0, 255), new Bgr(0, 255, 0), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            #region draw the projected region on the Image

            //画出单应矩阵
            if (homography != null)
            {
                var rect = modelImage.ROI;
                /*PointF[] pts = new PointF[] {
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)
                    };*/
                var pts = new[]
                {
                    new PointF(rect.Left + (rect.Right - rect.Left)/5, rect.Bottom - (rect.Bottom - rect.Top)/5),
                    new PointF(rect.Right - (rect.Right - rect.Left)/5, rect.Bottom - (rect.Bottom - rect.Top)/5),
                    new PointF(rect.Right - (rect.Right - rect.Left)/5, rect.Top + (rect.Bottom - rect.Top)/5),
                    new PointF(rect.Left + (rect.Right - rect.Left)/5, rect.Top + (rect.Bottom - rect.Top)/5)
                };
                //根据整个图片的旋转、变形情况,计算出原图中四个顶点转换后的坐标,并画出四边形
                homography.ProjectPoints(pts);
                area = Getarea(pts);
                double xsum = 0;
                double ysum = 0;
                foreach (var point in pts)
//.........这里部分代码省略.........
开发者ID:kylezhaoxc,项目名称:Kyle_Emgu,代码行数:101,代码来源:SurfProcessor.cs

示例7: testSIFT

        public bool testSIFT(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage)
        {
            bool isFound = false;
            HomographyMatrix homography = null;

            SIFTDetector siftCPU = new SIFTDetector();
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix<int> indices;

            Matrix<byte> mask;
            int k = 2;
            double uniquenessThreshold = 0.8;

            //extract features from the object image
            modelKeyPoints = siftCPU.DetectKeyPointsRaw(modelImage, null);
            Matrix<float> modelDescriptors = siftCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

            // extract features from the observed image
            observedKeyPoints = siftCPU.DetectKeyPointsRaw(observedImage, null);
            Matrix<float> observedDescriptors = siftCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add(modelDescriptors);

            indices = new Matrix<int>(observedDescriptors.Rows, k);
            using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
            {
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                mask = new Matrix<byte>(dist.Rows, 1);
                mask.SetValue(255);
                Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
            }

            //Draw the matched keypoints
            Image<Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
               indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            #region draw the projected region on the image
            if (homography != null)
            {  //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[] pts = new PointF[] {
                   new PointF(rect.Left, rect.Bottom),
                   new PointF(rect.Right, rect.Bottom),
                   new PointF(rect.Right, rect.Top),
                   new PointF(rect.Left, rect.Top)};
                homography.ProjectPoints(pts);

                if (CvInvoke.cvCountNonZero(mask) >= 10)
                    isFound = true;

                result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.LightGreen), 5);
            }
            #endregion
            return isFound;
        }
开发者ID:gitter-badger,项目名称:LogoRecognizer,代码行数:64,代码来源:Form2.cs

示例8: MultiplyInternal

        private static Matrix MultiplyInternal(Vector3D left, Vector3D right)
        {
            var matrix = new Matrix(3, 3);
            matrix.SetValue(0, 0, left.X * right.X);
               matrix.SetValue(0, 1, left.X * right.Y);
             matrix.SetValue(0, 2, left.X * right.Z);

             matrix.SetValue(1, 0, left.Y * right.X);
             matrix.SetValue(1, 1, left.Y * right.Y);
             matrix.SetValue(1, 2, left.Y * right.Z);

            matrix.SetValue(2, 0, left.Z * right.X);
             matrix.SetValue(2, 1, left.Z * right.Y);
             matrix.SetValue(2, 2, left.Z * right.Z);

            return matrix;
        }
开发者ID:GTuritto,项目名称:ngenerics,代码行数:17,代码来源:Vector3D.cs

示例9: FindMatch

        public static void FindMatch(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out Matrix<int> indices, out Matrix<byte> mask, out HomographyMatrix homography)
        {
            int k = 2;
              double uniquenessThreshold = 0.8;
              SURFDetector surfCPU = new SURFDetector(500, false);
              Stopwatch watch;
              homography = null;

              //extract features from the object image
              modelKeyPoints = new VectorOfKeyPoint();
              Matrix<float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);

              watch = Stopwatch.StartNew();

              // extract features from the observed image
              observedKeyPoints = new VectorOfKeyPoint();
              Matrix<float> observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
              BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
              matcher.Add(modelDescriptors);

              indices = new Matrix<int>(observedDescriptors.Rows, k);
              using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
              {
              matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
              mask = new Matrix<byte>(dist.Rows, 1);
              mask.SetValue(255);
              Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
              }

              int nonZeroCount = CvInvoke.cvCountNonZero(mask);
              if (nonZeroCount >= 4)
              {
              nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
              if (nonZeroCount >= 4)
                 homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
              }
              watch.Stop();

              matchTime = watch.ElapsedMilliseconds;
        }
开发者ID:RichardxLee,项目名称:RileeCapture,代码行数:40,代码来源:SURFMatch.cs

示例10: TestRTreesLetterRecognition

      public void TestRTreesLetterRecognition()
      {
         Matrix<float> data, response;
         ReadLetterRecognitionData(out data, out response);

         int trainingSampleCount = (int) (data.Rows * 0.8);

         Matrix<Byte> varType = new Matrix<byte>(data.Cols + 1, 1);
         varType.SetValue((byte) MlEnum.VarType.Numerical); //the data is numerical
         varType[data.Cols, 0] = (byte) MlEnum.VarType.Categorical; //the response is catagorical

         Matrix<byte> sampleIdx = new Matrix<byte>(data.Rows, 1);
         using (Matrix<byte> sampleRows = sampleIdx.GetRows(0, trainingSampleCount, 1))
            sampleRows.SetValue(255);

         using (RTrees forest = new RTrees())
         using (TrainData td = new TrainData(data, MlEnum.DataLayoutType.RowSample, response, null, sampleIdx, null, varType))
         {
            forest.MaxDepth = 10;
            forest.MinSampleCount = 10;
            forest.RegressionAccuracy = 0.0f;
            forest.UseSurrogates = false;
            forest.MaxCategories = 15;
            forest.CalculateVarImportance = true;
            forest.ActiveVarCount = 4;
            forest.TermCriteria = new MCvTermCriteria(100, 0.01f);
            bool success = forest.Train(td);

            if (!success)
               return;
            
            double trainDataCorrectRatio = 0;
            double testDataCorrectRatio = 0;
            for (int i = 0; i < data.Rows; i++)
            {
               using (Matrix<float> sample = data.GetRow(i))
               {
                  double r = forest.Predict(sample, null);
                  r = Math.Abs(r - response[i, 0]);
                  if (r < 1.0e-5)
                  {
                     if (i < trainingSampleCount)
                        trainDataCorrectRatio++;
                     else
                        testDataCorrectRatio++;
                  }
               }
            }

            trainDataCorrectRatio /= trainingSampleCount;
            testDataCorrectRatio /= (data.Rows - trainingSampleCount);

            StringBuilder builder = new StringBuilder("Variable Importance: ");
            /*
            using (Matrix<float> varImportance = forest.VarImportance)
            {
               for (int i = 0; i < varImportance.Cols; i++)
               {
                  builder.AppendFormat("{0} ", varImportance[0, i]);
               }
            }*/

            EmguAssert.WriteLine(String.Format("Prediction accuracy for training data :{0}%", trainDataCorrectRatio * 100));
            EmguAssert.WriteLine(String.Format("Prediction accuracy for test data :{0}%", testDataCorrectRatio * 100));
            EmguAssert.WriteLine(builder.ToString());
         }
      }
开发者ID:Delaley,项目名称:emgucv,代码行数:67,代码来源:AutoTestML.cs

示例11: DrawBruteForceMatch

        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImageFileName">The model image</param>
        /// <param name="observedImageBitmap">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        private System.Drawing.Point[] DrawBruteForceMatch(String modelImageFileName, Bitmap observedImageBitmap, out long matchTime)
            {
                try
                {
                    Image<Gray, Byte> modelImage = new Image<Gray, byte>(modelImageFileName);
                    Image<Gray, Byte> observedImage = new Image<Gray, byte>(observedImageBitmap);
                    HomographyMatrix homography = null;
                    Stopwatch watch;
                    SURFDetector surfCPU = new SURFDetector(500, false);
                    VectorOfKeyPoint modelKeyPoints;
                    VectorOfKeyPoint observedKeyPoints;
                    Matrix<int> indices;

                    Matrix<byte> mask;
                    int k = 2;
                    double uniquenessThreshold = 0.8;

                    //extract features from the object image
                    modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
                    Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
                    Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
                    BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
                    matcher.Add(modelDescriptors);

                    indices = new Matrix<int>(observedDescriptors.Rows, k);
                    Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k);
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                    mask = new Matrix<byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);

                    int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                    }
                    watch.Stop();

                    //Draw the matched keypoints
                    Image<Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                                            indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

                    System.Drawing.Point[] newpts = null;
                    #region draw the projected region on the image
                    if (homography != null)
                    {
                        //draw a rectangle along the projected model
                        Rectangle rect = modelImage.ROI;
                        PointF[] pts = new PointF[] { 
                                                               new PointF(rect.Left, rect.Bottom),
                                                               new PointF(rect.Right, rect.Bottom),
                                                               new PointF(rect.Right, rect.Top),
                                                               new PointF(rect.Left, rect.Top)};
                        homography.ProjectPoints(pts);
                        //result.DrawPolyline(Array.ConvertAll<PointF, System.Drawing.Point>(pts, System.Drawing.Point.Round), true, new Bgr(Color.Red), 2);
                        //result.Save(@"E:\1.jpg");
                        newpts = Array.ConvertAll<PointF, System.Drawing.Point>(pts, System.Drawing.Point.Round);

                    }
                    #endregion
                    matchTime = watch.ElapsedMilliseconds;
                    return newpts;
                }
                catch (Exception e)
                {
                    Console.WriteLine(e.Message);
                    matchTime = 0;
                    return new System.Drawing.Point[] { new System.Drawing.Point(-1, -1), new System.Drawing.Point(-1, -1), new System.Drawing.Point(-1, -1), new System.Drawing.Point(-1, -1) };
                }
        }
开发者ID:shonwang,项目名称:.Net-UIAutomation-Encapsulate-Again,代码行数:84,代码来源:UIAutomation.cs

示例12: Decompose

        /// <summary>
        /// Decomposes the specified matrix, using a QR decomposition.
        /// </summary>
        /// <param name="matrix">The matrix to decompose.</param>
        public void Decompose(Matrix matrix)
        {
            qr = matrix.Clone();
             diagonal = new double[qr.Columns];

             // Main loop.
             for (var k = 0; k < qr.Columns; k++)
             {
            // Compute 2-norm of k-th column without under/overflow.
            double nrm = 0;

            for (var i = k; i < qr.Rows; i++)
            {
               nrm = MathAlgorithms.Hypotenuse(nrm, qr[i, k]);
            }

            if (nrm != 0.0)
            {

               // Form k-th Householder vector.
               if (qr.GetValue(k, k) < 0)
               {
                  nrm = -nrm;
               }
               for (var i = k; i < qr.Rows; i++)
               {
                  qr.SetValue(i, k, qr.GetValue(i, k) / nrm);
               }

               qr.SetValue(k, k, qr.GetValue(k, k) + 1.0);

               // Apply transformation to remaining columns.
               for (var j = k + 1; j < qr.Columns; j++)
               {

                  var s = 0.0;

                  for (var i = k; i < qr.Rows; i++)
                  {
                     s += qr.GetValue(i, k) * qr.GetValue(i, j);
                  }

                  s = (-s) / qr.GetValue(k, k);

                  for (var i = k; i < qr.Rows; i++)
                  {
                     qr.SetValue(i, j, qr.GetValue(i, j) + (s * qr.GetValue(i, k)));
                  }
               }
            }
            diagonal[k] = -nrm;
             }
        }
开发者ID:GTuritto,项目名称:ngenerics,代码行数:57,代码来源:QRDecomposition.cs

示例13: Decompose

        /// <summary>
        /// Decomposes the specified matrix using a LU decomposition.
        /// </summary>
        /// <param name="matrix">The matrix to decompose.</param>
        public void Decompose(Matrix matrix)
        {
            LU = matrix.Clone();

             pivots = new int[LU.Rows];

             for (var i = 0; i < LU.Rows; i++)
             {
            pivots[i] = i;
             }

             pivotSign = 1;

             var column = new double[LU.Rows];

             for (var j = 0; j < LU.Columns; j++)
             {
            for (var i = 0; i < LU.Rows; i++)
            {
               column[i] = LU.GetValue(i, j);
            }

            // Apply previous transformations.
            for (var i = 0; i < LU.Rows; i++)
            {
               // Most of the time is spent in the following dot product.
               var kmax = Math.Min(i, j);
               var s = 0.0;

               for (var k = 0; k < kmax; k++)
               {
                  s += LU.GetValue(i, k) * column[k];
               }

               LU.SetValue(i, j, column[i] - s);
               column[i] -= s;
            }

            // Find pivot and exchange if necessary.
            var p = j;

            for (var i = j + 1; i < LU.Rows; i++)
            {
               if (Math.Abs(column[i]) > Math.Abs(column[p]))
               {
                  p = i;
               }
            }

            if (p != j)
            {
               for (var k = 0; k < LU.Columns; k++)
               {
                  var t = LU[p, k];
                  LU.SetValue(p, k, LU[j, k]);
                  LU.SetValue(j, k, t);
               }

               Swapper.Swap(pivots, p, j);

               pivotSign = -pivotSign;
            }

            // Compute multipliers.
            if ((j < LU.Rows) && (LU.GetValue(j, j) != 0.0))
            {
               for (var i = j + 1; i < LU.Rows; i++)
               {
                  LU.SetValue(i, j, LU.GetValue(i, j) / LU.GetValue(j, j));
               }
            }
             }
        }
开发者ID:GTuritto,项目名称:ngenerics,代码行数:77,代码来源:LUDecomposition.cs

示例14: Detect

        public static Boolean Detect(ObjectDetectee observedScene, ObjectDetectee obj)
        {
            HomographyMatrix homography = null;

            VectorOfKeyPoint observedKeyPoints;
            Matrix<int> indices;

            Matrix<byte> mask;
            int k = 2;
            double uniquenessThreshold = 0.8;
            int testsPassed = 0;

            // extract features from the observed image
            observedKeyPoints = observedScene.objectKeyPoints;
            Matrix<float> observedDescriptors = observedScene.objectDescriptors;
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add(obj.objectDescriptors);
            if (observedDescriptors == null)
            {
                return false;
            }
            indices = new Matrix<int>(observedDescriptors.Rows, k);
            using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
            {
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                mask = new Matrix<byte>(dist.Rows, 1);
                mask.SetValue(255);
                Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            int nonZero = 0;
            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(obj.objectKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(obj.objectKeyPoints, observedKeyPoints, indices, mask, 2);
                    for (int i = 0; i < mask.Height; i++)
                    {
                        for (int j = 0; j < mask.Width; j++)
                        {
                            if (mask[i, j] != 0)
                            {
                                nonZero++;
                            }
                        }
                    }
                    if (nonZero > 4)
                    {
                        testsPassed++;
                    }
                }

            }

            if (homography != null)
            {
                //draw a rectangle along the projected model
                Rectangle rect = obj.objectImage.ROI;
                PointF[] pts = new PointF[] {
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};

                using (MemStorage m1 = new MemStorage())
                using (MemStorage m2 = new MemStorage())
                {

                    Contour<PointF> objPoly = new Contour<PointF>(m1);
                    Contour<PointF> scenePoly = new Contour<PointF>(m2);
                    pts.OrderBy(p => p.X).ThenBy(p => p.Y);
                    foreach (PointF i in pts)
                    {
                        objPoly.Push(i);
                    }
                    homography.ProjectPoints(pts);
                    pts.OrderBy(p => p.X).ThenBy(p => p.Y);
                    foreach (PointF i in pts)
                    {
                        scenePoly.Push(i);
                    }
                    double shapeMatch = CvInvoke.cvMatchShapes(objPoly, scenePoly, Emgu.CV.CvEnum.CONTOURS_MATCH_TYPE.CV_CONTOURS_MATCH_I3, 0);
                    double ratio = scenePoly.Area / objPoly.Area;
                    foreach (PointF i in pts)
                    {
                        if (i.X < 0 || i.Y < 0)
                        {
                            return false;
                        }
                    }
                    if (shapeMatch != 0 && shapeMatch <= 2)
                    {
                        testsPassed++;
                    }
                    if (ratio > 0.001 && ratio < 5.25)
                    {
                        testsPassed++;
                    }
//.........这里部分代码省略.........
开发者ID:NeoValkyrion,项目名称:ubiquidine,代码行数:101,代码来源:ObjectMatcher.cs

示例15: TestCompare

      public void TestCompare()
      {
         Matrix<float> f1 = new Matrix<float>(1, 380);
         f1.SetValue(0.8);
         Matrix<float> f2 = new Matrix<float>(f1.Size);
         f2.SetValue(1.0);
         Matrix<byte> mask1 = new Matrix<byte>(f1.Size);
         CvInvoke.Compare(f1, f2, mask1, CvEnum.CmpType.LessEqual);
         int total1 = CvInvoke.CountNonZero(mask1);

         EmguAssert.IsTrue(total1 == f1.Width * f1.Height);

         Matrix<Byte> mask2 = new Matrix<byte>(f1.Size);
         using (ScalarArray ia = new ScalarArray(1.0))
         {
            CvInvoke.Compare(f1, ia, mask2, CvEnum.CmpType.LessEqual);
            int total2 = CvInvoke.CountNonZero(mask2);
            EmguAssert.IsTrue(total1 == total2);
         }
      }
开发者ID:neutmute,项目名称:emgucv,代码行数:20,代码来源:AutoTestImage.cs


注:本文中的System.Matrix.SetValue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。