当前位置: 首页>>代码示例>>C#>>正文


C# Size.GetValueOrDefault方法代码示例

本文整理汇总了C#中Size.GetValueOrDefault方法的典型用法代码示例。如果您正苦于以下问题:C# Size.GetValueOrDefault方法的具体用法?C# Size.GetValueOrDefault怎么用?C# Size.GetValueOrDefault使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Size的用法示例。


在下文中一共展示了Size.GetValueOrDefault方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: DetectMultiScale

        /// <summary>
        /// Performs object detection with a multi-scale window.
        /// </summary>
        /// <param name="img">Source image. CV_8UC1 and CV_8UC4 types are supported for now.</param>
        /// <param name="foundWeights"></param>
        /// <param name="hitThreshold">Threshold for the distance between features and SVM classifying plane.</param>
        /// <param name="winStride">Window stride. It must be a multiple of block stride.</param>
        /// <param name="padding">Mock parameter to keep the CPU interface compatibility. It must be (0,0).</param>
        /// <param name="scale">Coefficient of the detection window increase.</param>
        /// <param name="groupThreshold">Coefficient to regulate the similarity threshold. 
        /// When detected, some objects can be covered by many rectangles. 0 means not to perform grouping.</param>
        /// <returns>Detected objects boundaries.</returns>
        public virtual Rect[] DetectMultiScale(Mat img, out double[] foundWeights,
            double hitThreshold = 0, Size? winStride = null, Size? padding = null, double scale = 1.05, int groupThreshold = 2)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfRect())
            using (var foundWeightsVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_detectMultiScale(ptr, img.CvPtr, flVec.CvPtr, foundWeightsVec.CvPtr,
                    hitThreshold, winStride0, padding0, scale, groupThreshold);
                foundWeights = foundWeightsVec.ToArray();
                return flVec.ToArray();
            }
        }
开发者ID:kaorun55,项目名称:opencvsharp,代码行数:32,代码来源:HOGDescriptor.cs

示例2: Compute

        /// <summary>
        /// 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="winStride"></param>
        /// <param name="padding"></param>
        /// <param name="locations"></param>
        /// <returns></returns>
        public virtual float[] Compute(Mat img, Size? winStride = null, Size? padding = null, Point[] locations = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfFloat())
            {
                int length = (locations != null) ? locations.Length : 0;
                NativeMethods.objdetect_HOGDescriptor_compute(ptr, img.CvPtr, flVec.CvPtr, winStride0, padding0, locations, length);
                return flVec.ToArray();
            }
        }
开发者ID:kaorun55,项目名称:opencvsharp,代码行数:24,代码来源:HOGDescriptor.cs

示例3: Detect

        /// <summary>
        /// Performs object detection without a multi-scale window.
        /// </summary>
        /// <param name="img">Source image. CV_8UC1 and CV_8UC4 types are supported for now.</param>
        /// <param name="weights"></param>
        /// <param name="hitThreshold">Threshold for the distance between features and SVM classifying plane. 
        /// Usually it is 0 and should be specfied in the detector coefficients (as the last free coefficient). 
        /// But if the free coefficient is omitted (which is allowed), you can specify it manually here.</param>
        /// <param name="winStride">Window stride. It must be a multiple of block stride.</param>
        /// <param name="padding">Mock parameter to keep the CPU interface compatibility. It must be (0,0).</param>
        /// <param name="searchLocations"></param>
        /// <returns>Left-top corner points of detected objects boundaries.</returns>
        public virtual Point[] Detect(Mat img, out double[] weights, 
            double hitThreshold = 0, Size? winStride = null, Size? padding = null, Point[] searchLocations = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfPoint())
            using (var weightsVec = new VectorOfDouble())
            {
                int slLength = (searchLocations != null) ? searchLocations.Length : 0;
                NativeMethods.objdetect_HOGDescriptor_detect(ptr, img.CvPtr, flVec.CvPtr, weightsVec.CvPtr,
                    hitThreshold, winStride0, padding0, searchLocations, slLength);
                weights = weightsVec.ToArray();
                return flVec.ToArray();
            }
        }
开发者ID:kaorun55,项目名称:opencvsharp,代码行数:33,代码来源:HOGDescriptor.cs

示例4: Create

 /// <summary>
 /// Creates a predefined CLAHE object
 /// </summary>
 /// <param name="clipLimit"></param>
 /// <param name="tileGridSize"></param>
 /// <returns></returns>
 public static CLAHE Create(double clipLimit = 40.0, Size? tileGridSize = null)
 {
     IntPtr ptr = NativeMethods.imgproc_createCLAHE(
         clipLimit, tileGridSize.GetValueOrDefault(new Size(8, 8)));
     return FromPtr(ptr);
 }
开发者ID:0sv,项目名称:opencvsharp,代码行数:12,代码来源:CLAHE.cs

示例5: HOGDescriptor

        /// <summary>
        /// HOG ディスクリプタおよび検出器を作成します
        /// </summary>
        /// <param name="winSize">検出窓サイズ.ブロックのサイズと移動量に合わせる必要があります.</param>
        /// <param name="blockSize">ピクセル単位で表されるブロックサイズ.セルサイズに合わせる必要があります.</param>
        /// <param name="blockStride">ブロックの移動量.セルサイズの倍数でなければいけません.</param>
        /// <param name="cellSize">セルサイズ.</param>
        /// <param name="nbins">ビンの個数.</param>
        /// <param name="derivAperture"></param>
        /// <param name="winSigma">ガウシアン平滑化窓パラメータ.</param>
        /// <param name="histogramNormType"></param>
        /// <param name="l2HysThreshold">L2-Hys 正規化縮小処理の閾値.</param>
        /// <param name="gammaCorrection">前処理としてガンマ補正を行うか否か,を指定します.</param>
        /// <param name="nlevels">検出窓拡大回数の最大値</param>
#else
        /// <summary>
        /// Creates the HOG descriptor and detector.
        /// </summary>
        /// <param name="winSize">Detection window size. Align to block size and block stride.</param>
        /// <param name="blockSize">Block size in pixels. Align to cell size. Only (16,16) is supported for now.</param>
        /// <param name="blockStride">Block stride. It must be a multiple of cell size.</param>
        /// <param name="cellSize">Cell size. Only (8, 8) is supported for now.</param>
        /// <param name="nbins">Number of bins. Only 9 bins per cell are supported for now.</param>
        /// <param name="derivAperture"></param>
        /// <param name="winSigma">Gaussian smoothing window parameter.</param>
        /// <param name="histogramNormType"></param>
        /// <param name="l2HysThreshold">L2-Hys normalization method shrinkage.</param>
        /// <param name="gammaCorrection">Flag to specify whether the gamma correction preprocessing is required or not.</param>
        /// <param name="nlevels">Maximum number of detection window increases.</param>
#endif
        public HOGDescriptor(
            Size? winSize = null,
            Size? blockSize = null,
            Size? blockStride = null,
            Size? cellSize = null,
            int nbins = 9, 
            int derivAperture = 1, 
            double winSigma = -1, 
            HistogramNormType histogramNormType = HistogramNormType.L2Hys, 
            double l2HysThreshold = 0.2, 
            bool gammaCorrection = true,
            int nlevels = DefaultNlevels)
        {
            ptr = NativeMethods.objdetect_HOGDescriptor_new2(
                winSize.GetValueOrDefault(new Size(64, 128)),
                blockSize.GetValueOrDefault(new Size(16, 16)),
                blockStride.GetValueOrDefault(new Size(8, 8)), 
                cellSize.GetValueOrDefault(new Size(8, 8)), 
                nbins,
                derivAperture, 
                winSigma, histogramNormType, 
                l2HysThreshold, 
                gammaCorrection ? 1 : 0, 
                nlevels);
        }
开发者ID:kaorun55,项目名称:opencvsharp,代码行数:55,代码来源:HOGDescriptor.cs

示例6: DetectMultiScale

        /// <summary>
        /// 
        /// </summary>
        /// <param name="image"></param>
        /// <param name="objectsBuf"></param>
        /// <param name="maxObjectSize"></param>
        /// <param name="minSize"></param>
        /// <param name="scaleFactor"></param>
        /// <param name="minNeighbors"></param>
        /// <returns>number of detected objects</returns>
        public int DetectMultiScale(
            GpuMat image, GpuMat objectsBuf, 
            Size maxObjectSize, Size? minSize = null, double scaleFactor = 1.1, int minNeighbors = 4)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null)
                throw new ArgumentNullException("image");
            if (objectsBuf == null)
                throw new ArgumentNullException("objectsBuf");

            CvSize minSizeVal = minSize.GetValueOrDefault(new Size());

            int ret = NativeMethods.gpu_CascadeClassifier_GPU_detectMultiScale2(
                ptr, image.CvPtr, objectsBuf.CvPtr, maxObjectSize, minSizeVal, 
                scaleFactor, minNeighbors);
            
            GC.KeepAlive(image);
            GC.KeepAlive(objectsBuf);
            return ret;
        }
开发者ID:0sv,项目名称:opencvsharp,代码行数:31,代码来源:CascadeClassifier_GPU.cs

示例7: StereoRectify

 /// <summary>
 /// computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
 /// </summary>
 /// <param name="cameraMatrix1">First camera matrix.</param>
 /// <param name="distCoeffs1">First camera distortion parameters.</param>
 /// <param name="cameraMatrix2">Second camera matrix.</param>
 /// <param name="distCoeffs2">Second camera distortion parameters.</param>
 /// <param name="imageSize">Size of the image used for stereo calibration.</param>
 /// <param name="R">Rotation matrix between the coordinate systems of the first and the second cameras.</param>
 /// <param name="T">Translation vector between coordinate systems of the cameras.</param>
 /// <param name="R1">Output 3x3 rectification transform (rotation matrix) for the first camera.</param>
 /// <param name="R2"> Output 3x3 rectification transform (rotation matrix) for the second camera.</param>
 /// <param name="P1">Output 3x4 projection matrix in the new (rectified) coordinate systems for the first camera.</param>
 /// <param name="P2">Output 3x4 projection matrix in the new (rectified) coordinate systems for the second camera.</param>
 /// <param name="Q">Output 4x4 disparity-to-depth mapping matrix (see reprojectImageTo3D() ).</param>
 /// <param name="flags">Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY. 
 /// If the flag is set, the function makes the principal points of each camera have the same pixel coordinates in the rectified views. 
 /// And if the flag is not set, the function may still shift the images in the horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the useful image area.</param>
 /// <param name="alpha">Free scaling parameter. 
 /// If it is -1 or absent, the function performs the default scaling. Otherwise, the parameter should be between 0 and 1. 
 /// alpha=0 means that the rectified images are zoomed and shifted so that only valid pixels are visible (no black areas after rectification). 
 /// alpha=1 means that the rectified image is decimated and shifted so that all the pixels from the original images from the cameras are retained 
 /// in the rectified images (no source image pixels are lost). Obviously, any intermediate value yields an intermediate result between those two extreme cases.</param>
 /// <param name="newImageSize">New image resolution after rectification. The same size should be passed to initUndistortRectifyMap(). When (0,0) is passed (default), it is set to the original imageSize . 
 /// Setting it to larger value can help you preserve details in the original image, especially when there is a big radial distortion.</param>
 public static void StereoRectify(double[,] cameraMatrix1, double[] distCoeffs1,
                                  double[,] cameraMatrix2, double[] distCoeffs2,
                                  Size imageSize, double[,] R, double[] T,
                                  out double[,] R1, out double[,] R2,
                                  out double[,] P1, out double[,] P2,
                                  out double[,] Q,
                                  StereoRectificationFlag flags = StereoRectificationFlag.ZeroDisparity,
                                  double alpha = -1, Size? newImageSize = null)
 {
     Size newImageSize0 = newImageSize.GetValueOrDefault(new Size(0, 0));
     Rect validPixROI1, validPixROI2;
     StereoRectify(
         cameraMatrix1, distCoeffs1,
         cameraMatrix2, distCoeffs2,
         imageSize, R, T,
         out R1, out R2, out P1, out P2, out Q,
         flags, alpha, newImageSize0, out validPixROI1, out validPixROI2);
 }
开发者ID:kaorun55,项目名称:opencvsharp,代码行数:43,代码来源:Cv2_calib3d.cs

示例8: PyrUp

 /// <summary>
 /// Upsamples an image and then blurs it.
 /// </summary>
 /// <param name="src">input image.</param>
 /// <param name="dst">output image. It has the specified size and the same type as src.</param>
 /// <param name="dstSize">size of the output image; by default, it is computed as Size(src.cols*2, (src.rows*2)</param>
 /// <param name="borderType"></param>
 public static void PyrUp(InputArray src, OutputArray dst,
     Size? dstSize = null, BorderType borderType = BorderType.Default)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src.ThrowIfDisposed();
     dst.ThrowIfNotReady();
     Size dstSize0 = dstSize.GetValueOrDefault(new Size());
     NativeMethods.imgproc_pyrUp(src.CvPtr, dst.CvPtr, dstSize0, (int)borderType);
     dst.Fix();
 }
开发者ID:josephgodwinkimani,项目名称:opencvsharp,代码行数:20,代码来源:Cv2_imgproc.cs

示例9: GetDefaultNewCameraMatrix

 /// <summary>
 /// returns the default new camera matrix (by default it is the same as cameraMatrix unless centerPricipalPoint=true)
 /// </summary>
 /// <param name="cameraMatrix">Input camera matrix.</param>
 /// <param name="imgSize">Camera view image size in pixels.</param>
 /// <param name="centerPrincipalPoint">Location of the principal point in the new camera matrix. 
 /// The parameter indicates whether this location should be at the image center or not.</param>
 /// <returns>the camera matrix that is either an exact copy of the input cameraMatrix 
 /// (when centerPrinicipalPoint=false), or the modified one (when centerPrincipalPoint=true).</returns>
 public static Mat GetDefaultNewCameraMatrix(InputArray cameraMatrix,
     Size? imgSize = null, bool centerPrincipalPoint = false)
 {
     if (cameraMatrix == null)
         throw new ArgumentNullException("cameraMatrix");
     cameraMatrix.ThrowIfDisposed();
     Size imgSize0 = imgSize.GetValueOrDefault(new Size());
     IntPtr matPtr = NativeMethods.imgproc_getDefaultNewCameraMatrix(cameraMatrix.CvPtr, imgSize0, centerPrincipalPoint ? 1 : 0);
     return new Mat(matPtr);
 }
开发者ID:josephgodwinkimani,项目名称:opencvsharp,代码行数:19,代码来源:Cv2_imgproc.cs

示例10: CalcOpticalFlowPyrLK

        /// <summary>
        /// computes sparse optical flow using multi-scale Lucas-Kanade algorithm
        /// </summary>
        /// <param name="prevImg"></param>
        /// <param name="nextImg"></param>
        /// <param name="prevPts"></param>
        /// <param name="nextPts"></param>
        /// <param name="status"></param>
        /// <param name="err"></param>
        /// <param name="winSize"></param>
        /// <param name="maxLevel"></param>
        /// <param name="criteria"></param>
        /// <param name="flags"></param>
        /// <param name="minEigThreshold"></param>
        public static void CalcOpticalFlowPyrLK(
            InputArray prevImg, InputArray nextImg,
            InputArray prevPts, InputOutputArray nextPts,
            OutputArray status, OutputArray err,
            Size? winSize = null,
            int maxLevel = 3,
            TermCriteria? criteria = null,
            OpticalFlowFlags flags = OpticalFlowFlags.None,
            double minEigThreshold = 1e-4)
        {
            if (prevImg == null)
                throw new ArgumentNullException("prevImg");
            if (nextImg == null)
                throw new ArgumentNullException("nextImg");
            if (prevPts == null)
                throw new ArgumentNullException("prevPts");
            if (nextPts == null)
                throw new ArgumentNullException("nextPts");
            if (status == null)
                throw new ArgumentNullException("status");
            if (err == null)
                throw new ArgumentNullException("err");
            prevImg.ThrowIfDisposed();
            nextImg.ThrowIfDisposed();
            prevPts.ThrowIfDisposed();
            nextPts.ThrowIfNotReady();
            status.ThrowIfNotReady();
            err.ThrowIfNotReady();

            Size winSize0 = winSize.GetValueOrDefault(new Size(21, 21));
            TermCriteria criteria0 = criteria.GetValueOrDefault(
                TermCriteria.Both(30, 0.01));

            NativeMethods.video_calcOpticalFlowPyrLK_InputArray(
                prevImg.CvPtr, nextImg.CvPtr, prevPts.CvPtr, nextPts.CvPtr,
                status.CvPtr, err.CvPtr, winSize0,maxLevel,
                criteria0, (int)flags, minEigThreshold);

            nextPts.Fix();
            status.Fix();
            err.Fix();
        }
开发者ID:healtech,项目名称:opencvsharp,代码行数:56,代码来源:Cv2_video.cs

示例11: HOGDescriptor

        /// <summary>
        /// HOG ディスクリプタおよび検出器を作成します
        /// </summary>
        /// <param name="winSize">検出窓サイズ.ブロックのサイズと移動量に合わせる必要があります.</param>
        /// <param name="blockSize">ピクセル単位で表されるブロックサイズ.セルサイズに合わせる必要があります.</param>
        /// <param name="blockStride">ブロックの移動量.セルサイズの倍数でなければいけません.</param>
        /// <param name="cellSize">セルサイズ.</param>
        /// <param name="nbins">ビンの個数.</param>
        /// <param name="winSigma">ガウシアン平滑化窓パラメータ.</param>
        /// <param name="thresholdL2Hys">L2-Hys 正規化縮小処理の閾値.</param>
        /// <param name="gammaCorrection">前処理としてガンマ補正を行うか否か,を指定します.</param>
        /// <param name="nlevels">検出窓拡大回数の最大値</param>
#else
        /// <summary>
        /// 
        /// </summary>
        /// <param name="winSize"></param>
        /// <param name="blockSize"></param>
        /// <param name="blockStride"></param>
        /// <param name="cellSize"></param>
        /// <param name="nbins"></param>
        /// <param name="winSigma"></param>
        /// <param name="thresholdL2Hys"></param>
        /// <param name="gammaCorrection"></param>
        /// <param name="nlevels"></param>
#endif
        public HOGDescriptor(
            Size? winSize = null,
            Size? blockSize = null,
            Size? blockStride = null,
            Size? cellSize = null,
            int nbins = 9,
            double winSigma = DefaultWinSigma, 
            double thresholdL2Hys = 0.2, 
            bool gammaCorrection = true, 
            int nlevels = DefaultNlevels)
        {
            Cv2Gpu.ThrowIfGpuNotAvailable();
            Size winSize0 = winSize.GetValueOrDefault(new Size(64, 128));
            Size blockSize0 = blockSize.GetValueOrDefault(new Size(16, 16));
            Size blockStride0 = blockStride.GetValueOrDefault(new Size(8, 8));
            Size cellSize0 = cellSize.GetValueOrDefault(new Size(8, 8));
            ptr = NativeMethods.HOGDescriptor_new(
                winSize0, blockSize0, blockStride0, cellSize0,
                nbins, winSigma, thresholdL2Hys, gammaCorrection, nlevels);
        }
开发者ID:0sv,项目名称:opencvsharp,代码行数:46,代码来源:HOGDescriptor.cs

示例12: DetectMultiScale

        /// <summary>
        /// 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="hitThreshold"></param>
        /// <param name="winStride"></param>
        /// <param name="padding"></param>
        /// <param name="scale"></param>
        /// <param name="groupThreshold"></param>
        /// <returns></returns>
        public virtual Rect[] DetectMultiScale(Mat img, double hitThreshold = 0, 
            Size? winStride = null, Size? padding = null, double scale = 1.05, int groupThreshold = 2)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfRect())
            {
                NativeMethods.HOGDescriptor_detectMultiScale(ptr, img.CvPtr, flVec.CvPtr, hitThreshold, winStride0, padding0, scale, groupThreshold);
                // std::vector<cv::Rect>*からCvRect[]に移し替えて返す
                return flVec.ToArray();
            }          
        }
开发者ID:0sv,项目名称:opencvsharp,代码行数:27,代码来源:HOGDescriptor.cs

示例13: DetectMultiScale

        /// <summary>
        /// Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles.
        /// </summary>
        /// <param name="image">Matrix of the type CV_8U containing an image where objects are detected.</param>
        /// <param name="rejectLevels"></param>
        /// <param name="levelWeights"></param>
        /// <param name="scaleFactor">Parameter specifying how much the image size is reduced at each image scale.</param>
        /// <param name="minNeighbors">Parameter specifying how many neighbors each candidate rectangle should have to retain it.</param>
        /// <param name="flags">Parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects. 
        /// It is not used for a new cascade.</param>
        /// <param name="minSize">Minimum possible object size. Objects smaller than that are ignored.</param>
        /// <param name="maxSize">Maximum possible object size. Objects larger than that are ignored.</param>
        /// <param name="outputRejectLevels"></param>
        /// <returns>Vector of rectangles where each rectangle contains the detected object.</returns>
        public virtual Rect[] DetectMultiScale(
            Mat image,
            out int[] rejectLevels,
            out double[] levelWeights,
            double scaleFactor = 1.1,
            int minNeighbors = 3,
            HaarDetectionType flags = HaarDetectionType.Zero,
            Size? minSize = null,
            Size? maxSize = null,
            bool outputRejectLevels = false)
        {
            if (disposed)
                throw new ObjectDisposedException("CascadeClassifier");
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            Size minSize0 = minSize.GetValueOrDefault(new Size());
            Size maxSize0 = maxSize.GetValueOrDefault(new Size());

            using (var objectsVec = new VectorOfRect())
            using (var rejectLevelsVec = new VectorOfInt32())
            using (var levelWeightsVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_CascadeClassifier_detectMultiScale(
                    ptr, image.CvPtr, objectsVec.CvPtr, rejectLevelsVec.CvPtr, levelWeightsVec.CvPtr,
                    scaleFactor, minNeighbors, (int)flags, minSize0, maxSize0, outputRejectLevels ? 1 : 0);

                rejectLevels = rejectLevelsVec.ToArray();
                levelWeights = levelWeightsVec.ToArray();
                return objectsVec.ToArray();
            }
        }
开发者ID:jorik041,项目名称:opencvsharp,代码行数:47,代码来源:CascadeClassifier.cs

示例14: ComputeGradient

        /// <summary>
        /// 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="grad"></param>
        /// <param name="angleOfs"></param>
        /// <param name="paddingTL"></param>
        /// <param name="paddingBR"></param>
        public virtual void ComputeGradient(Mat img, Mat grad, Mat angleOfs, Size? paddingTL = null, Size? paddingBR = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            if (grad == null)
                throw new ArgumentNullException("grad");
            if (angleOfs == null)
                throw new ArgumentNullException("angleOfs");
            img.ThrowIfDisposed();
            grad.ThrowIfDisposed();
            angleOfs.ThrowIfDisposed();

            Size paddingTL0 = paddingTL.GetValueOrDefault(new Size());
            Size paddingBR0 = paddingBR.GetValueOrDefault(new Size());
            NativeMethods.objdetect_HOGDescriptor_computeGradient(ptr, img.CvPtr, grad.CvPtr, angleOfs.CvPtr, paddingTL0, paddingBR0);
        }
开发者ID:kaorun55,项目名称:opencvsharp,代码行数:26,代码来源:HOGDescriptor.cs

示例15: DetectROI

        /// <summary>
        /// evaluate specified ROI and return confidence value for each location
        /// </summary>
        /// <param name="img"></param>
        /// <param name="locations"></param>
        /// <param name="foundLocations"></param>
        /// <param name="confidences"></param>
        /// <param name="hitThreshold"></param>
        /// <param name="winStride"></param>
        /// <param name="padding"></param>
        public void DetectROI(
            Mat img, Point[] locations, out Point[] foundLocations, out double[] confidences,
            double hitThreshold = 0, Size? winStride = null, Size? padding = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            if (locations == null)
                throw new ArgumentNullException("locations");
            img.ThrowIfDisposed();

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfPoint())
            using (var cVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_detectROI(ptr, img.CvPtr, locations, locations.Length,
                    flVec.CvPtr, cVec.CvPtr, hitThreshold, winStride0, padding0);
                foundLocations = flVec.ToArray();
                confidences = cVec.ToArray();
            }
        }
开发者ID:kaorun55,项目名称:opencvsharp,代码行数:33,代码来源:HOGDescriptor.cs


注:本文中的Size.GetValueOrDefault方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。