当前位置: 首页>>代码示例>>C#>>正文


C# Matrix.SubMatrix方法代码示例

本文整理汇总了C#中System.Matrix.SubMatrix方法的典型用法代码示例。如果您正苦于以下问题:C# Matrix.SubMatrix方法的具体用法?C# Matrix.SubMatrix怎么用?C# Matrix.SubMatrix使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在System.Matrix的用法示例。


在下文中一共展示了Matrix.SubMatrix方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: MakePrediction

        /// <summary>
        /// Make some classification predictions on a toy dataset using a SVC
        ///
        /// If binary is True restrict to a binary classification problem instead of a
        /// multiclass classification problem
        /// </summary>
        /// <param name="x"></param>
        /// <param name="y"></param>
        /// <param name="binary"></param>
        private static Tuple<int[], int[], Matrix<double>> MakePrediction(
            Matrix<double> x = null,
            int[] y = null,
            bool binary = false)
        {
            if (x == null && y == null)
            {
                // import some data to play with
                var dataset = IrisDataset.Load();
                x = dataset.Data;
                y = dataset.Target;
            }

            if (binary)
            {
                // restrict to a binary classification task
                x = x.RowsAt(y.Indices(v => v < 2));
                y = y.Where(v => v < 2).ToArray();
            }

            int nSamples = x.RowCount;
            int nFeatures = x.ColumnCount;
            var rng = new Random(37);
            int[] p = Shuffle(rng, Enumerable.Range(0, nSamples).ToArray());
            x = x.RowsAt(p);
            y = y.ElementsAt(p);
            var half = nSamples/2;

            // add noisy features to make the problem harder and avoid perfect results
            rng = new Random(0);
            x = x.HStack(DenseMatrix.CreateRandom(nSamples, 200, new Normal{RandomSource = rng}));

            // run classifier, get class probabilities and label predictions
            var clf = new Svc<int>(kernel: Kernel.Linear, probability: true);
            clf.Fit(x.SubMatrix(0, half, 0, x.ColumnCount), y.Take(half).ToArray());
            Matrix<double> probasPred = clf.PredictProba(x.SubMatrix(half, x.RowCount - half, 0, x.ColumnCount));

            if (binary)
            {
                // only interested in probabilities of the positive case
                // XXX: do we really want a special API for the binary case?
                probasPred = probasPred.SubMatrix(0, probasPred.RowCount, 1, 1);
            }

            var yPred = clf.Predict(x.SubMatrix(half, x.RowCount - half, 0, x.ColumnCount));
            var yTrue = y.Skip(half).ToArray();
            return Tuple.Create(yTrue, yPred, probasPred);
        }
开发者ID:geoparsYoti,项目名称:Sharpkit.Learn,代码行数:57,代码来源:MetricsTest.cs

示例2: OneVsOneCoef

        /// <summary>
        /// Generate primal coefficients from dual coefficients
        /// for the one-vs-one multi class LibSVM in the case
        /// of a linear kernel.
        /// </summary>
        private static Vector<double>[] OneVsOneCoef(
            Matrix<double> dualCoef,
            int[] nSupport,
            Matrix<double> supportVectors)
        {
            // get 1vs1 weights for all n*(n-1) classifiers.
            // this is somewhat messy.
            // shape of dual_coef_ is nSV * (n_classes -1)
            // see docs for details
            int nClass = dualCoef.RowCount + 1;

            // XXX we could do preallocation of coef but
            // would have to take care in the sparse case
            var coef = new List<Vector<double>>();
            var svLocs = CumSum(new[] { 0 }.Concat(nSupport).ToArray());
            for (int class1 = 0; class1 < nClass; class1++)
            {
                // SVs for class1:
                var sv1 = supportVectors.SubMatrix(
                    svLocs[class1],
                    svLocs[class1 + 1] - svLocs[class1],
                    0,
                    supportVectors.ColumnCount);

                for (int class2 = class1 + 1; class2 < nClass; class2++)
                {
                    // SVs for class1:
                    var sv2 = supportVectors.SubMatrix(
                        svLocs[class2],
                        svLocs[class2 + 1] - svLocs[class2],
                        0,
                        supportVectors.ColumnCount);

                    // dual coef for class1 SVs:
                    var alpha1 = dualCoef.Row(class2 - 1).SubVector(
                        svLocs[class1],
                        svLocs[class1 + 1] - svLocs[class1]);

                    // dual coef for class2 SVs:
                    var alpha2 = dualCoef.Row(class1).SubVector(
                        svLocs[class2],
                        svLocs[class2 + 1] - svLocs[class2]);

                    // build weight for class1 vs class2
                    coef.Add((alpha1 * sv1) + (alpha2 * sv2));
                }
            }

            return coef.ToArray();
        }
开发者ID:geoparsYoti,项目名称:Sharpkit.Learn,代码行数:55,代码来源:LibSvmBase.cs

示例3: UpdateCameraMatrix

        void UpdateCameraMatrix(Matrix<double> camera, CameraIndex index)
        {
            var RQ = camera.SubMatrix(0, 3, 0, 3).QR();

            var calib = RQ.R;
            if(Math.Abs(calib[2, 2] - 1) > 1e-6)
            {
                double scale = calib[2, 2];
                camera.MultiplyThis(scale);
                // NotifyPropertyChanged("CameraLeft");
                RQ = camera.SubMatrix(0, 3, 0, 3).QR();
            }
            calib = RQ.R;
            var rot = RQ.Q;

            // If fx < 0 then set fx = -fx and [r11,r12,r13] = -[r11,r12,r13]
            // As first row of rotation matrix is multiplied only with fx, then changing sign of both
            // fx and this row won't change matrix M = K*R, and so camera matrix (same with fy, but we need to change skew also)
            if(calib[0, 0] < 0)
            {
                calib[0, 0] = -calib[0, 0];
                rot[0, 0] = -rot[0, 0];
                rot[0, 1] = -rot[0, 1];
                rot[0, 2] = -rot[0, 2];
            }
            if(calib[1, 1] < 0)
            {
                calib[1, 1] = -calib[1, 1];
                calib[0, 1] = -calib[0, 1];
                rot[1, 0] = -rot[1, 0];
                rot[1, 1] = -rot[1, 1];
                rot[1, 2] = -rot[1, 2];
            }

            var trans = -camera.SubMatrix(0, 3, 0, 3).Inverse().Multiply(camera.Column(3));

            SetCalibrationMatrix(index, calib);
            SetRotationMatrix(index, rot);
            SetTranslationVector(index, trans);

            ComputeEssentialFundamental();
        }
开发者ID:KFlaga,项目名称:Cam3D,代码行数:42,代码来源:CalibrationData.cs

示例4: Diff

 /// <summary>
 /// Calculates the difference on the magnitude spectrogram
 /// </summary>
 /// <param name="spec">the magnitude spectrogram</param>
 /// <param name="pos">only keep positive values</param>
 /// <param name="diffFrames">calculate the difference to the N-th previous frame</param>
 public Matrix<float> Diff(Matrix<float> spec, bool pos=false, int diffFrames=0)
 {
     var diff = _allocator.GetFloatMatrix(spec.RowCount, spec.ColumnCount);
     //var diff = Matrix<float>.Build.SameAs(spec);
     if (diffFrames == 0) diffFrames = _diffFrames;
     //calculate the diff
     var subMatrix = spec.SubMatrix(diffFrames, spec.RowCount - diffFrames, 0, spec.ColumnCount).Subtract(spec.SubMatrix(0, spec.RowCount - diffFrames, 0, spec.ColumnCount));
     diff.SetSubMatrix(diffFrames, 0, subMatrix);
     if (pos)
         diff = diff.PointwiseMultiply(diff.Map(f => (float)((f > 0) ? 1 : -1)));
     return diff;
 }
开发者ID:opcon,项目名称:onset-detection,代码行数:18,代码来源:SpectralODF.cs

示例5: localizeLink

        //This is only used by the Part Exporter, but it localizes the link to the Origin_global coordinate system
        public void localizeLink(link Link, Matrix<double> GlobalTransform)
        {
            Matrix<double> GlobalTransformInverse = GlobalTransform.Inverse();
            Matrix<double> linkCoMTransform = ops.getTranslation(Link.Inertial.Origin.xyz);
            Matrix<double> localLinkCoMTransform = GlobalTransformInverse * linkCoMTransform;

            Matrix<double> linkVisualTransform = ops.getTransformation(Link.Visual.Origin.xyz, Link.Visual.Origin.rpy);
            Matrix<double> localVisualTransform = GlobalTransformInverse * linkVisualTransform;

            Matrix<double> linkCollisionTransform = ops.getTransformation(Link.Collision.Origin.xyz, Link.Collision.Origin.rpy);
            Matrix<double> localCollisionTransform = GlobalTransformInverse * linkCollisionTransform;

            // The linear array in Link.Inertial.Inertia.Moment is in row major order, but this matrix constructor uses column major order
            // It's a rotation matrix, so this shouldn't matter. If it does, just transpose linkGlobalMomentInertia
            // These three matrices are 3x3 as opposed to the 4x4 transformation matrices above. You're welcome for the confusion.
            Matrix<double> linkGlobalMomentInertia = new DenseMatrix(3, 3, Link.Inertial.Inertia.Moment);
            Matrix<double> GlobalRotMat = GlobalTransform.SubMatrix(0, 3, 0, 3);
            Matrix<double> linkLocalMomentInertia = GlobalRotMat.Inverse() * linkGlobalMomentInertia;

            Link.Inertial.Origin.xyz = ops.getXYZ(localLinkCoMTransform);
            Link.Inertial.Origin.rpy = new double[] { 0, 0, 0 };

            // Wait are you saying that even though the matrix was trasposed from column major order, you are writing it in row-major order here.
            // Yes, yes I am.
            double[] moment = linkLocalMomentInertia.ToRowWiseArray();
            Link.Inertial.Inertia.setMomentMatrix(moment);


            Link.Collision.Origin.xyz = ops.getXYZ(localCollisionTransform);
            Link.Collision.Origin.rpy = ops.getRPY(localCollisionTransform);

            Link.Visual.Origin.rpy = ops.getXYZ(localVisualTransform);
            Link.Visual.Origin.xyz = ops.getRPY(localVisualTransform);
        }
开发者ID:chlai,项目名称:sw2urdf,代码行数:35,代码来源:URDFExporterExtension.cs

示例6: backwardPass

        /// <summary>
        /// Backward pass, used for backprop.
        /// Propagates the input from the forward pass back into the network.
        /// </summary>
        /// <param name="pat">input pattern</param>
        /// <param name="targets">targets for the input</param>
        private void backwardPass(Matrix<float> pat, Matrix<float> targets)
        {
            /*
                % backward pass, MATLAB code
                delta_o = (out - targets) .* ((1 + out) .* (1 - out)) * 0.5;
                delta_h = (v' * delta_o) .* ((1 + hout) .* (1 - hout)) * 0.5;
                delta_h = delta_h(1:hidden, :);
             */
            Matrix<float> r1= (net_out.Add(new DenseMatrix(net_out.RowCount, net_out.ColumnCount, 1.0f))).PointwiseMultiply(new DenseMatrix(net_out.RowCount, net_out.ColumnCount, 1.0f).Subtract(net_out));
            net_deltao = (net_out - targets).PointwiseMultiply(r1).Multiply(0.5f);
            Matrix<float> r2 = (net_hout.Add(new DenseMatrix(net_hout.RowCount, net_hout.ColumnCount, 1.0f))).PointwiseMultiply(new DenseMatrix(net_hout.RowCount, net_hout.ColumnCount, 1.0f).Subtract(net_hout));
            net_deltah = (weights2.Transpose().Multiply(net_deltao)).PointwiseMultiply(r2).Multiply(0.5f);

            net_deltah = net_deltah.SubMatrix(0, HiddenNodes, 0, pat.ColumnCount);
        }
开发者ID:felix11,项目名称:MSP-Workshops,代码行数:21,代码来源:MultilayerNetwork.cs

示例7: Test_EstimateCameraMatrix_NoisedLinear

        public void Test_EstimateCameraMatrix_NoisedLinear()
        {
            PrepareCameraMatrix();
            var points = GenerateCalibrationPoints_Random();
            PrepareCalibrator(
                AddNoise(points, _varianceReal, _varianceImage));

            _calib.HomoPoints();

            _calib.NormalizeImagePoints();
            _calib.NormalizeRealPoints();

            _calib.CameraMatrix = _calib.FindLinearEstimationOfCameraMatrix();

            _calib.DenormaliseCameraMatrix();
            _calib.DecomposeCameraMatrix();
            _eCM = _calib.CameraMatrix;

            double scaleK = 1.0 / _calib.CameraInternalMatrix[2, 2];
            _eCM.MultiplyThis(-scaleK);

            var eK = _calib.CameraInternalMatrix.Multiply(scaleK);
            var eR = -_calib.CameraRotationMatrix;
            var eC = -(_eCM.SubMatrix(0, 3, 0, 3).Inverse() * _eCM.Column(3));

            Matrix<double> eExt = new DenseMatrix(3, 4);
            eExt.SetSubMatrix(0, 0, eR);
            eExt.SetColumn(3, -eR * eC);

            var eCM = eK * eExt;

            var errVec = _CM.PointwiseDivide_NoNaN(_eCM);
            double err = errVec.L2Norm();
            Assert.IsTrue(
                Math.Abs(err - Math.Sqrt(12)) < Math.Sqrt(12) / 50.0 || // max 2% diffrence
                (_eCM - _CM).FrobeniusNorm() < 1e-3);

            for(int p = 0; p < _pointsCount; ++p)
            {
                var cp = points[p];
                Vector<double> rp = new DenseVector(4);
                rp[0] = cp.RealX;
                rp[1] = cp.RealY;
                rp[2] = cp.RealZ;
                rp[3] = 1.0;
                var imagePoint = _eCM * rp;

                Vector2 ip = new Vector2(imagePoint[0] / imagePoint[2], imagePoint[1] / imagePoint[2]);
                Assert.IsTrue((ip - cp.Img).Length() < cp.Img.Length() / 10.0);
            }
        }
开发者ID:KFlaga,项目名称:Cam3D,代码行数:51,代码来源:CameraMatrixComputationTests.cs

示例8: Test_EstimateCameraMatrix_Minimised

        public void Test_EstimateCameraMatrix_Minimised()
        {
            PrepareCameraMatrix();
            var points = GenerateCalibrationPoints_Random(100);
            var noisedPoints = AddNoise(points, _varianceReal, _varianceImage, 200);
            PrepareCalibrator(noisedPoints);

            _calib.HomoPoints();
            _calib.NormalizeImagePoints();
            _calib.NormalizeRealPoints();
            _calib._pointsNormalised = true;
            _calib.CameraMatrix = _calib.FindLinearEstimationOfCameraMatrix();
            //    _calib.FindNormalisedVariances();
            _calib.DenormaliseCameraMatrix();
            _calib._pointsNormalised = false;

            _eCM = _calib.CameraMatrix;
            double totalDiff = 0.0;
            for(int p = 0; p < _pointsCount; ++p)
            {
                var cp = points[p];
                Vector<double> rp = new DenseVector(4);
                rp[0] = cp.RealX;
                rp[1] = cp.RealY;
                rp[2] = cp.RealZ;
                rp[3] = 1.0;
                var imagePoint = _eCM * rp;

                Vector2 ip = new Vector2(imagePoint[0] / imagePoint[2], imagePoint[1] / imagePoint[2]);
                totalDiff += (ip - cp.Img).Length();
                Assert.IsTrue((ip - cp.Img).Length() < 1.0,
                    "Point after linear estimation too far : " + (ip - cp.Img).Length());
            }

            _calib.HomoPoints();
             _calib.NormalizeImagePoints();
             _calib.NormalizeRealPoints();
            _calib.CameraMatrix = _calib.FindLinearEstimationOfCameraMatrix();
            _calib._pointsNormalised = true;
            _calib.FindNormalisedVariances();
            _calib.UseCovarianceMatrix = true;
            var lecm = _eCM.Clone();

            // Disturb camera matrix a little
              //          _calib.CameraMatrix = AddNoise(_calib.CameraMatrix);

            _calib._miniAlg.DoComputeJacobianNumerically = true;
            _calib._miniAlg.NumericalDerivativeStep = 1e-4;
            _calib.MinimizeError();

            // _calib.DenormaliseCameraMatrix();
            _calib.DecomposeCameraMatrix();
            _eCM = _calib.CameraMatrix;

            var errVec = _eCM.PointwiseDivide_NoNaN(lecm);
            double err = errVec.L2Norm();

            double scaleK = 1.0 / _calib.CameraInternalMatrix[2, 2];
            _eCM.MultiplyThis(-scaleK);

            var eK = _calib.CameraInternalMatrix.Multiply(scaleK);
            var eR = -_calib.CameraRotationMatrix;
            var eC = -(_eCM.SubMatrix(0, 3, 0, 3).Inverse() * _eCM.Column(3));

            Matrix<double> eExt = new DenseMatrix(3, 4);
            eExt.SetSubMatrix(0, 0, eR);
            eExt.SetColumn(3, -eR * eC);

            var eCM = eK * eExt;

            // var errVec = _CM.PointwiseDivide_NoNaN(_eCM);
            // double err = errVec.L2Norm();
            // Assert.IsTrue(
            //    Math.Abs(err - Math.Sqrt(12)) < Math.Sqrt(12) / 1000.0 || // max 0.1% diffrence
            //    (_eCM - _CM).FrobeniusNorm() < 1e-3);

            double estDiff = 0;
            for(int p = 0; p < _pointsCount; ++p)
            {
                var cp = points[p];
                Vector<double> rp = new DenseVector(4);
                rp[0] = cp.RealX;
                rp[1] = cp.RealY;
                rp[2] = cp.RealZ;
                rp[3] = 1.0;
                var imagePoint = _eCM * rp;

                Vector2 ip = new Vector2(imagePoint[0] / imagePoint[2], imagePoint[1] / imagePoint[2]);
                estDiff += (ip - cp.Img).Length();
                Assert.IsTrue((ip - cp.Img).Length() < 1.5,
                        "Point after error minimalisation too far : " + (ip - cp.Img).Length());
            }

            var minialg = _calib._miniAlg;
            // Test conovergence :
            // ||mX-rX|| = ||mX-eX|| + ||rX-eX|| (or squared??)
            // rX - real point from 'points'
            // mX - measured point, noised
            // eX = estimated X from result vector for 3d points and ePeX for image point
            double len2_mr = 0;
//.........这里部分代码省略.........
开发者ID:KFlaga,项目名称:Cam3D,代码行数:101,代码来源:CameraMatrixComputationTests.cs


注:本文中的System.Matrix.SubMatrix方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。