当前位置: 首页>>代码示例>>C#>>正文


C# IOutputArray类代码示例

本文整理汇总了C#中IOutputArray的典型用法代码示例。如果您正苦于以下问题:C# IOutputArray类的具体用法?C# IOutputArray怎么用?C# IOutputArray使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


IOutputArray类属于命名空间,在下文中一共展示了IOutputArray类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: WarpAffine

 /// <summary>
 /// Applies an affine transformation to an image.
 /// </summary>
 /// <param name="src">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="mapMatrix">2x3 transformation matrix</param>
 /// <param name="dsize">Size of the output image.</param>
 /// <param name="interpMethod">Interpolation method</param>
 /// <param name="warpMethod">Warp method</param>
 /// <param name="borderMode">Pixel extrapolation method</param>
 /// <param name="borderValue">A value used to fill outliers</param>
 public static void WarpAffine(IInputArray src, IOutputArray dst, IInputArray mapMatrix, Size dsize, CvEnum.Inter interpMethod = CvEnum.Inter.Linear, CvEnum.Warp warpMethod = CvEnum.Warp.Default, CvEnum.BorderType borderMode = CvEnum.BorderType.Constant, MCvScalar borderValue = new MCvScalar())
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaMapMatrix = mapMatrix.GetInputArray())
       cveWarpAffine(iaSrc, oaDst, iaMapMatrix, ref dsize, (int)interpMethod | (int)warpMethod, borderMode, ref borderValue);
 }
开发者ID:Warren-GH,项目名称:emgucv,代码行数:18,代码来源:CvInvokeImgproc.cs

示例2: Compute

 /// <summary>
 /// Computes disparity map for the specified stereo pair
 /// </summary>
 /// <param name="matcher">The stereo matcher</param>
 /// <param name="left">Left 8-bit single-channel image.</param>
 /// <param name="right">Right image of the same size and the same type as the left one.</param>
 /// <param name="disparity">Output disparity map. It has the same size as the input images. Some algorithms, like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map</param>
 public static void Compute(this IStereoMatcher matcher, IInputArray left, IInputArray right, IOutputArray disparity)
 {
    using (InputArray iaLeft = left.GetInputArray())
    using (InputArray iaRight = right.GetInputArray())
    using (OutputArray oaDisparity = disparity.GetOutputArray())
       CvStereoMatcherCompute(matcher.StereoMatcherPtr, iaLeft, iaRight, oaDisparity);
 }
开发者ID:neutmute,项目名称:emgucv,代码行数:14,代码来源:StereoMatcherExtensions.cs

示例3: Detect

 /// <summary>
 /// Find the good features to track
 /// </summary>
 public void Detect(IInputArray image, IOutputArray corners, IInputArray mask = null, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaCorners = corners.GetOutputArray())
    using (InputArray iaMask = (mask == null ? mask.GetInputArray() : InputArray.GetEmpty()))
       CudaInvoke.cudaCornersDetectorDetect(_ptr, iaImage, oaCorners, iaMask, stream);
 }
开发者ID:neutmute,项目名称:emgucv,代码行数:10,代码来源:CudaGoodFeaturesToTrackDetector.cs

示例4: FindStereoCorrespondence

 /// <summary>
 /// Computes disparity map for the input rectified stereo pair.
 /// </summary>
 /// <param name="left">The left single-channel, 8-bit image</param>
 /// <param name="right">The right image of the same size and the same type</param>
 /// <param name="disparity">The disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void FindStereoCorrespondence(IInputArray left, IInputArray right, IOutputArray disparity, Stream stream = null)
 {
    using (InputArray iaLeft = left.GetInputArray())
    using (InputArray iaRight = right.GetInputArray())
    using (OutputArray oaDisparity = disparity.GetOutputArray())
       CudaInvoke.cudaStereoBMFindStereoCorrespondence(_ptr, iaLeft, iaRight, oaDisparity, stream);
 }
开发者ID:reidblomquist,项目名称:emgucv,代码行数:14,代码来源:CudaStereoBM.cs

示例5: DetectMultiScale

 /// <summary>
 /// Detects objects of different sizes in the input image.
 /// </summary>
 /// <param name="image">Matrix of type CV_8U containing an image where objects should be detected.</param>
 /// <param name="objects">Buffer to store detected objects (rectangles).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void DetectMultiScale(IInputArray image, IOutputArray objects, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaObjects = objects.GetOutputArray())
       CudaInvoke.cudaCascadeClassifierDetectMultiScale(_ptr, iaImage, oaObjects,
          stream == null ? IntPtr.Zero : stream.Ptr);
 }
开发者ID:neutmute,项目名称:emgucv,代码行数:13,代码来源:CudaCascadeClassifier.cs

示例6: Index

      /*
      /// <summary>
      /// Create an auto-tuned flann index
      /// </summary>
      /// <param name="values">A row by row matrix of descriptors</param>
      /// <param name="targetPrecision">Precision desired, use 0.9 if not sure</param>
      /// <param name="buildWeight">build tree time weighting factor, use 0.01 if not sure</param>
      /// <param name="memoryWeight">index memory weighting factor, use 0 if not sure</param>
      /// <param name="sampleFraction">what fraction of the dataset to use for autotuning, use 0.1 if not sure</param>
      public Index(IInputArray values, float targetPrecision, float buildWeight, float memoryWeight, float sampleFraction)
      {
         using (InputArray iaValues = values.GetInputArray())
            _ptr = CvFlannIndexCreateAutotuned(iaValues, targetPrecision, buildWeight, memoryWeight, sampleFraction);
      }*/
      #endregion

      /// <summary>
      /// Perform k-nearest-neighbours (KNN) search
      /// </summary>
      /// <param name="queries">A row by row matrix of descriptors to be query for nearest neighbours</param>
      /// <param name="indices">The result of the indices of the k-nearest neighbours</param>
      /// <param name="squareDistances">The square of the Eculidean distance between the neighbours</param>
      /// <param name="knn">Number of nearest neighbors to search for</param>
      /// <param name="checks">The number of times the tree(s) in the index should be recursively traversed. A
      /// higher value for this parameter would give better search precision, but also take more
      /// time. If automatic configuration was used when the index was created, the number of
      /// checks required to achieve the specified precision was also computed, in which case
      /// this parameter is ignored </param>
      public void KnnSearch(IInputArray queries, IOutputArray indices, IOutputArray squareDistances, int knn, int checks)
      {
         using (InputArray iaQueries = queries.GetInputArray())
         using (OutputArray oaIndices = indices.GetOutputArray())
         using (OutputArray oaSquareDistances = squareDistances.GetOutputArray())
         CvFlannIndexKnnSearch(_ptr, iaQueries, oaIndices, oaSquareDistances, knn, checks);
      }
开发者ID:neutmute,项目名称:emgucv,代码行数:35,代码来源:Index.cs

示例7: DetectAndCompute

 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 public void DetectAndCompute(IInputArray image, IInputArray mask, VectorOfKeyPoint keyPoints, IOutputArray descriptors, bool useProvidedKeyPoints)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    using (OutputArray oaDescriptors = descriptors.GetOutputArray())
       Feature2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
 }
开发者ID:neutmute,项目名称:emgucv,代码行数:15,代码来源:Feature2D.cs

示例8: Apply

 /// <summary>
 /// Apply the filter to the disparity image
 /// </summary>
 /// <param name="disparity">The input disparity map</param>
 /// <param name="image">The image</param>
 /// <param name="dst">The output disparity map, should have the same size as the input disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray disparity, IInputArray image, IOutputArray dst, Stream stream = null)
 {
    using (InputArray iaDisparity = disparity.GetInputArray())
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       CudaInvoke.cudaDisparityBilateralFilterApply(this, iaDisparity, iaImage, oaDst, stream);
 }
开发者ID:neutmute,项目名称:emgucv,代码行数:14,代码来源:CudaDisparityBilateralFilter.cs

示例9: FindHomography

 /// <summary>
 /// Finds perspective transformation H=||h_ij|| between the source and the destination planes
 /// </summary>
 /// <param name="srcPoints">Point coordinates in the original plane</param>
 /// <param name="dstPoints">Point coordinates in the destination plane</param>
 /// <param name="homography">The output homography matrix</param>
 /// <param name="method">FindHomography method</param>
 /// <param name="ransacReprojThreshold">
 /// The maximum allowed reprojection error to treat a point pair as an inlier. 
 /// The parameter is only used in RANSAC-based homography estimation. 
 /// E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3
 /// </param>
 /// <param name="mask">Optional output mask set by a robust method ( CV_RANSAC or CV_LMEDS ). Note that the input mask values are ignored.</param>
 /// <returns>The 3x3 homography matrix if found. Null if not found.</returns>
 public static void FindHomography(
    PointF[] srcPoints,
    PointF[] dstPoints,
    IOutputArray homography,
    CvEnum.HomographyMethod method,
    double ransacReprojThreshold = 3,
    IOutputArray mask = null)
 {
    GCHandle srcHandle = GCHandle.Alloc(srcPoints, GCHandleType.Pinned);
    GCHandle dstHandle = GCHandle.Alloc(dstPoints, GCHandleType.Pinned);
    try
    {
       using (
          Mat srcPointMatrix = new Mat(srcPoints.Length, 2, DepthType.Cv32F, 1, srcHandle.AddrOfPinnedObject(), 8))
       using (
          Mat dstPointMatrix = new Mat(dstPoints.Length, 2, DepthType.Cv32F, 1, dstHandle.AddrOfPinnedObject(), 8))
       {
          CvInvoke.FindHomography(srcPointMatrix, dstPointMatrix, homography, method, ransacReprojThreshold, mask);
       }
    }
    finally
    {
       srcHandle.Free();
       dstHandle.Free();
    }
 }
开发者ID:neutmute,项目名称:emgucv,代码行数:40,代码来源:CvInvokeCalib3d.cs

示例10: Inpaint

 /// <summary>
 /// Reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video.
 /// </summary>
 /// <param name="src">The input 8-bit 1-channel or 3-channel image</param>
 /// <param name="mask">The inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted</param>
 /// <param name="dst">The output image of the same format and the same size as input</param>
 /// <param name="flags">The inpainting method</param>
 /// <param name="inpaintRadius">The radius of circular neighborhood of each point inpainted that is considered by the algorithm</param>
 public static void Inpaint(IInputArray src, IInputArray mask, IOutputArray dst, double inpaintRadius, CvEnum.InpaintType flags)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputArray iaMask = mask.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveInpaint(iaSrc, iaMask, oaDst, inpaintRadius, flags);
 }
开发者ID:reidblomquist,项目名称:emgucv,代码行数:15,代码来源:CvInvokePhoto.cs

示例11: DetectAsync

 /// <summary>
 /// Detect the features in the image
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="keypoints">The result vector of keypoints</param>
 /// <param name="image">The image from which the features will be detected from</param>
 /// <param name="mask">The optional mask.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void DetectAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IInputArray mask = null, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaKeypoints = keypoints.GetOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       CudaInvoke.cveCudaFeature2dAsyncDetectAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, iaMask, stream);
 }
开发者ID:neutmute,项目名称:emgucv,代码行数:15,代码来源:Feature2DAsync.cs

示例12: Match

 /// <summary>
 ///  This function is similiar to cvCalcBackProjectPatch. It slids through image, compares overlapped patches of size wxh with templ using the specified method and stores the comparison results to result
 /// </summary>
 /// <param name="image">Image where the search is running. It should be 8-bit or 32-bit floating-point</param>
 /// <param name="templ">Searched template; must be not greater than the source image and the same data type as the image</param>
 /// <param name="result">A map of comparison results; single-channel 32-bit floating-point. If image is WxH and templ is wxh then result must be W-w+1xH-h+1.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>  
 public void Match(IInputArray image, IInputArray templ, IOutputArray result, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaTempl = templ.GetInputArray())
    using (OutputArray oaResult = result.GetOutputArray())
       CudaInvoke.cudaTemplateMatchingMatch(_ptr, iaImage, iaTempl, oaResult, stream);
 }
开发者ID:neutmute,项目名称:emgucv,代码行数:14,代码来源:CudaTemplateMatching.cs

示例13: NextFrame

 public bool NextFrame(IOutputArray frame)
 {
    using (OutputArray oaFrame = frame.GetOutputArray())
    {
       return CudaInvoke.cudaVideoReaderNextFrame(_ptr, oaFrame);
    }
 }
开发者ID:reidblomquist,项目名称:emgucv,代码行数:7,代码来源:CudaVideoReader.cs

示例14: GetRectSubPix

 /// <summary>
 /// Extracts pixels from src:
 /// dst(x, y) = src(x + center.x - (width(dst)-1)*0.5, y + center.y - (height(dst)-1)*0.5)
 /// where the values of pixels at non-integer coordinates are retrieved using bilinear interpolation. Every channel of multiple-channel images is processed independently. Whereas the rectangle center must be inside the image, the whole rectangle may be partially occluded. In this case, the replication border mode is used to get pixel values beyond the image boundaries.
 /// </summary>
 /// <param name="image">Source image</param>
 /// <param name="patchSize">Size of the extracted patch.</param>
 /// <param name="patch">Extracted rectangle</param>
 /// <param name="patchType">Depth of the extracted pixels. By default, they have the same depth as <paramref name="image"/>.</param>
 /// <param name="center">Floating point coordinates of the extracted rectangle center within the source image. The center must be inside the image.</param>
 public static void GetRectSubPix(IInputArray image, Size patchSize, PointF center, IOutputArray patch, DepthType patchType = DepthType.Default)
 {
    using (InputArray iaSrc = image.GetInputArray())
    using (OutputArray oaPatch = patch.GetOutputArray())
    {
       cveGetRectSubPix(iaSrc, ref patchSize, ref center, oaPatch, patchType);
    }
 }
开发者ID:Warren-GH,项目名称:emgucv,代码行数:18,代码来源:CvInvokeImgproc.cs

示例15: DtFilter

 public static void DtFilter(IInputArray guide, IInputArray src, IOutputArray dst,
    double sigmaSpatial, double sigmaColor, int mode, int numIters)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveDtFilter(iaGuide, iaSrc, oaDst, sigmaSpatial, sigmaColor, mode, numIters);
 }
开发者ID:Delaley,项目名称:emgucv,代码行数:8,代码来源:XImgprocInvoke.cs


注:本文中的IOutputArray类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。