本文整理汇总了C#中IOutputArray.GetOutputArray方法的典型用法代码示例。如果您正苦于以下问题:C# IOutputArray.GetOutputArray方法的具体用法?C# IOutputArray.GetOutputArray怎么用?C# IOutputArray.GetOutputArray使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IOutputArray
的用法示例。
在下文中一共展示了IOutputArray.GetOutputArray方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: WarpAffine
/// <summary>
/// Applies an affine transformation to an image.
/// </summary>
/// <param name="src">Source image</param>
/// <param name="dst">Destination image</param>
/// <param name="mapMatrix">2x3 transformation matrix</param>
/// <param name="dsize">Size of the output image.</param>
/// <param name="interpMethod">Interpolation method</param>
/// <param name="warpMethod">Warp method</param>
/// <param name="borderMode">Pixel extrapolation method</param>
/// <param name="borderValue">A value used to fill outliers</param>
public static void WarpAffine(IInputArray src, IOutputArray dst, IInputArray mapMatrix, Size dsize, CvEnum.Inter interpMethod = CvEnum.Inter.Linear, CvEnum.Warp warpMethod = CvEnum.Warp.Default, CvEnum.BorderType borderMode = CvEnum.BorderType.Constant, MCvScalar borderValue = new MCvScalar())
{
using (InputArray iaSrc = src.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
using (InputArray iaMapMatrix = mapMatrix.GetInputArray())
cveWarpAffine(iaSrc, oaDst, iaMapMatrix, ref dsize, (int)interpMethod | (int)warpMethod, borderMode, ref borderValue);
}
示例2: Compute
/// <summary>
/// Computes disparity map for the specified stereo pair
/// </summary>
/// <param name="matcher">The stereo matcher</param>
/// <param name="left">Left 8-bit single-channel image.</param>
/// <param name="right">Right image of the same size and the same type as the left one.</param>
/// <param name="disparity">Output disparity map. It has the same size as the input images. Some algorithms, like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map</param>
public static void Compute(this IStereoMatcher matcher, IInputArray left, IInputArray right, IOutputArray disparity)
{
using (InputArray iaLeft = left.GetInputArray())
using (InputArray iaRight = right.GetInputArray())
using (OutputArray oaDisparity = disparity.GetOutputArray())
CvStereoMatcherCompute(matcher.StereoMatcherPtr, iaLeft, iaRight, oaDisparity);
}
示例3: NextFrame
public bool NextFrame(IOutputArray frame)
{
using (OutputArray oaFrame = frame.GetOutputArray())
{
return CudaInvoke.cudaVideoReaderNextFrame(_ptr, oaFrame);
}
}
示例4: FindStereoCorrespondence
/// <summary>
/// Computes disparity map for the input rectified stereo pair.
/// </summary>
/// <param name="left">The left single-channel, 8-bit image</param>
/// <param name="right">The right image of the same size and the same type</param>
/// <param name="disparity">The disparity map</param>
/// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
public void FindStereoCorrespondence(IInputArray left, IInputArray right, IOutputArray disparity, Stream stream = null)
{
using (InputArray iaLeft = left.GetInputArray())
using (InputArray iaRight = right.GetInputArray())
using (OutputArray oaDisparity = disparity.GetOutputArray())
CudaInvoke.cudaStereoBMFindStereoCorrespondence(_ptr, iaLeft, iaRight, oaDisparity, stream);
}
示例5: Index
/*
/// <summary>
/// Create an auto-tuned flann index
/// </summary>
/// <param name="values">A row by row matrix of descriptors</param>
/// <param name="targetPrecision">Precision desired, use 0.9 if not sure</param>
/// <param name="buildWeight">build tree time weighting factor, use 0.01 if not sure</param>
/// <param name="memoryWeight">index memory weighting factor, use 0 if not sure</param>
/// <param name="sampleFraction">what fraction of the dataset to use for autotuning, use 0.1 if not sure</param>
public Index(IInputArray values, float targetPrecision, float buildWeight, float memoryWeight, float sampleFraction)
{
using (InputArray iaValues = values.GetInputArray())
_ptr = CvFlannIndexCreateAutotuned(iaValues, targetPrecision, buildWeight, memoryWeight, sampleFraction);
}*/
#endregion
/// <summary>
/// Perform k-nearest-neighbours (KNN) search
/// </summary>
/// <param name="queries">A row by row matrix of descriptors to be query for nearest neighbours</param>
/// <param name="indices">The result of the indices of the k-nearest neighbours</param>
/// <param name="squareDistances">The square of the Eculidean distance between the neighbours</param>
/// <param name="knn">Number of nearest neighbors to search for</param>
/// <param name="checks">The number of times the tree(s) in the index should be recursively traversed. A
/// higher value for this parameter would give better search precision, but also take more
/// time. If automatic configuration was used when the index was created, the number of
/// checks required to achieve the specified precision was also computed, in which case
/// this parameter is ignored </param>
public void KnnSearch(IInputArray queries, IOutputArray indices, IOutputArray squareDistances, int knn, int checks)
{
using (InputArray iaQueries = queries.GetInputArray())
using (OutputArray oaIndices = indices.GetOutputArray())
using (OutputArray oaSquareDistances = squareDistances.GetOutputArray())
CvFlannIndexKnnSearch(_ptr, iaQueries, oaIndices, oaSquareDistances, knn, checks);
}
示例6: Apply
/// <summary>
/// Apply the filter to the disparity image
/// </summary>
/// <param name="disparity">The input disparity map</param>
/// <param name="image">The image</param>
/// <param name="dst">The output disparity map, should have the same size as the input disparity map</param>
/// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
public void Apply(IInputArray disparity, IInputArray image, IOutputArray dst, Stream stream = null)
{
using (InputArray iaDisparity = disparity.GetInputArray())
using (InputArray iaImage = image.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
CudaInvoke.cudaDisparityBilateralFilterApply(this, iaDisparity, iaImage, oaDst, stream);
}
示例7: Inpaint
/// <summary>
/// Reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video.
/// </summary>
/// <param name="src">The input 8-bit 1-channel or 3-channel image</param>
/// <param name="mask">The inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted</param>
/// <param name="dst">The output image of the same format and the same size as input</param>
/// <param name="flags">The inpainting method</param>
/// <param name="inpaintRadius">The radius of circular neighborhood of each point inpainted that is considered by the algorithm</param>
public static void Inpaint(IInputArray src, IInputArray mask, IOutputArray dst, double inpaintRadius, CvEnum.InpaintType flags)
{
using (InputArray iaSrc = src.GetInputArray())
using (InputArray iaMask = mask.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
cveInpaint(iaSrc, iaMask, oaDst, inpaintRadius, flags);
}
示例8: DetectAsync
/// <summary>
/// Detect the features in the image
/// </summary>
/// <param name="feature2DAsync">The Feature2DAsync object</param>
/// <param name="keypoints">The result vector of keypoints</param>
/// <param name="image">The image from which the features will be detected from</param>
/// <param name="mask">The optional mask.</param>
/// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
public static void DetectAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IInputArray mask = null, Stream stream = null)
{
using (InputArray iaImage = image.GetInputArray())
using (OutputArray oaKeypoints = keypoints.GetOutputArray())
using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
CudaInvoke.cveCudaFeature2dAsyncDetectAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, iaMask, stream);
}
示例9: DetectAndCompute
/// <summary>
/// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
/// </summary>
/// <param name="image">The image</param>
/// <param name="mask">The optional mask, can be null if not needed</param>
/// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
/// <param name="descriptors">The descriptors from the keypoints</param>
/// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
public void DetectAndCompute(IInputArray image, IInputArray mask, VectorOfKeyPoint keyPoints, IOutputArray descriptors, bool useProvidedKeyPoints)
{
using (InputArray iaImage = image.GetInputArray())
using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
using (OutputArray oaDescriptors = descriptors.GetOutputArray())
Feature2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
}
示例10: Match
/// <summary>
/// This function is similiar to cvCalcBackProjectPatch. It slids through image, compares overlapped patches of size wxh with templ using the specified method and stores the comparison results to result
/// </summary>
/// <param name="image">Image where the search is running. It should be 8-bit or 32-bit floating-point</param>
/// <param name="templ">Searched template; must be not greater than the source image and the same data type as the image</param>
/// <param name="result">A map of comparison results; single-channel 32-bit floating-point. If image is WxH and templ is wxh then result must be W-w+1xH-h+1.</param>
/// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
public void Match(IInputArray image, IInputArray templ, IOutputArray result, Stream stream = null)
{
using (InputArray iaImage = image.GetInputArray())
using (InputArray iaTempl = templ.GetInputArray())
using (OutputArray oaResult = result.GetOutputArray())
CudaInvoke.cudaTemplateMatchingMatch(_ptr, iaImage, iaTempl, oaResult, stream);
}
示例11: DetectMultiScale
/// <summary>
/// Detects objects of different sizes in the input image.
/// </summary>
/// <param name="image">Matrix of type CV_8U containing an image where objects should be detected.</param>
/// <param name="objects">Buffer to store detected objects (rectangles).</param>
/// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
public void DetectMultiScale(IInputArray image, IOutputArray objects, Stream stream = null)
{
using (InputArray iaImage = image.GetInputArray())
using (OutputArray oaObjects = objects.GetOutputArray())
CudaInvoke.cudaCascadeClassifierDetectMultiScale(_ptr, iaImage, oaObjects,
stream == null ? IntPtr.Zero : stream.Ptr);
}
示例12: Detect
/// <summary>
/// Find the good features to track
/// </summary>
public void Detect(IInputArray image, IOutputArray corners, IInputArray mask = null, Stream stream = null)
{
using (InputArray iaImage = image.GetInputArray())
using (OutputArray oaCorners = corners.GetOutputArray())
using (InputArray iaMask = (mask == null ? mask.GetInputArray() : InputArray.GetEmpty()))
CudaInvoke.cudaCornersDetectorDetect(_ptr, iaImage, oaCorners, iaMask, stream);
}
示例13: GuidedFilter
public static void GuidedFilter(IInputArray guide, IInputArray src, IOutputArray dst, int radius, double eps,
int dDepth)
{
using (InputArray iaGuide = guide.GetInputArray())
using (InputArray iaSrc = src.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
cveGuidedFilter(iaGuide, iaSrc, oaDst, radius, eps, dDepth);
}
示例14: Predict
public static float Predict(this IStatModel model, IInputArray samples, IOutputArray results = null, int flags = 0)
{
using (InputArray iaSamples = samples.GetInputArray())
using (OutputArray oaResults = results == null ? OutputArray.GetEmpty() : results.GetOutputArray())
{
return MlInvoke.StatModelPredict(model.StatModelPtr, iaSamples, oaResults, flags);
}
}
示例15: GetRectSubPix
/// <summary>
/// Extracts pixels from src:
/// dst(x, y) = src(x + center.x - (width(dst)-1)*0.5, y + center.y - (height(dst)-1)*0.5)
/// where the values of pixels at non-integer coordinates are retrieved using bilinear interpolation. Every channel of multiple-channel images is processed independently. Whereas the rectangle center must be inside the image, the whole rectangle may be partially occluded. In this case, the replication border mode is used to get pixel values beyond the image boundaries.
/// </summary>
/// <param name="image">Source image</param>
/// <param name="patchSize">Size of the extracted patch.</param>
/// <param name="patch">Extracted rectangle</param>
/// <param name="patchType">Depth of the extracted pixels. By default, they have the same depth as <paramref name="image"/>.</param>
/// <param name="center">Floating point coordinates of the extracted rectangle center within the source image. The center must be inside the image.</param>
public static void GetRectSubPix(IInputArray image, Size patchSize, PointF center, IOutputArray patch, DepthType patchType = DepthType.Default)
{
using (InputArray iaSrc = image.GetInputArray())
using (OutputArray oaPatch = patch.GetOutputArray())
{
cveGetRectSubPix(iaSrc, ref patchSize, ref center, oaPatch, patchType);
}
}