本文整理汇总了C#中IInputOutputArray.GetInputOutputArray方法的典型用法代码示例。如果您正苦于以下问题:C# IInputOutputArray.GetInputOutputArray方法的具体用法?C# IInputOutputArray.GetInputOutputArray怎么用?C# IInputOutputArray.GetInputOutputArray使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IInputOutputArray
的用法示例。
在下文中一共展示了IInputOutputArray.GetInputOutputArray方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Calc
public static void Calc(this ICudaDenseOpticalFlow denseFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow, Stream stream = null)
{
using (InputArray iaI0 = i0.GetInputArray())
using (InputArray iaI1 = i1.GetInputArray())
using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
cudaDenseOpticalFlowCalc(denseFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow, (stream == null) ? IntPtr.Zero : stream.Ptr);
}
示例2: GetRedPixelMask
/// <summary>
/// Compute the red pixel mask for the given image.
/// A red pixel is a pixel where: 20 < hue < 160 AND saturation > 10
/// </summary>
/// <param name="image">The color image to find red mask from</param>
/// <param name="mask">The red pixel mask</param>
private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
{
bool useUMat;
using (InputOutputArray ia = mask.GetInputOutputArray())
useUMat = ia.IsUMat;
using (IImage hsv = useUMat ? (IImage)new UMat() : (IImage)new Mat())
using (IImage s = useUMat ? (IImage)new UMat() : (IImage)new Mat())
{
CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
CvInvoke.ExtractChannel(hsv, mask, 0);
CvInvoke.ExtractChannel(hsv, s, 1);
//the mask for hue less than 20 or larger than 160
using (ScalarArray lower = new ScalarArray(20))
using (ScalarArray upper = new ScalarArray(160))
CvInvoke.InRange(mask, lower, upper, mask);
CvInvoke.BitwiseNot(mask, mask);
//s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
CvInvoke.BitwiseAnd(mask, s, mask, null);
}
}
示例3: Calc
/// <summary>
/// Calculates an optical flow.
/// </summary>
/// <param name="i0">First 8-bit single-channel input image.</param>
/// <param name="i1">Second input image of the same size and the same type as prev.</param>
/// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
/// <param name="opticalFlow">The dense optical flow object</param>
public static void Calc(this IDenseOpticalFlow opticalFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow)
{
using (InputArray iaI0 = i0.GetInputArray())
using (InputArray iaI1 = i1.GetInputArray())
using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
CvInvoke.cveDenseOpticalFlowCalc(opticalFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow);
}
示例4: Calc
/// <summary>
/// Calculates a sparse optical flow.
/// </summary>
/// <param name="sparseFlow">The sparse optical flow</param>
/// <param name="prevImg">First input image.</param>
/// <param name="nextImg">Second input image of the same size and the same type as <paramref name="prevImg"/>.</param>
/// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
/// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
/// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found. Otherwise, it is set to 0.</param>
/// <param name="err">Optional output vector that contains error response for each point (inverse confidence).</param>
/// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
public static void Calc(this ICudaSparseOpticalFlow sparseFlow, IInputArray prevImg, IInputArray nextImg, IInputArray prevPts, IInputOutputArray nextPts, IOutputArray status = null, IOutputArray err = null, Stream stream = null)
{
using (InputArray iaPrevImg = prevImg.GetInputArray())
using (InputArray iaNextImg = nextImg.GetInputArray())
using (InputArray iaPrevPts = prevPts.GetInputArray())
using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
using (OutputArray oaStatus = (status == null ? OutputArray.GetEmpty() : status.GetOutputArray()))
using (OutputArray oaErr = (err == null ? OutputArray.GetEmpty() : err.GetOutputArray()))
cudaSparseOpticalFlowCalc(sparseFlow.SparseOpticalFlowPtr, iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts,
oaStatus, oaErr, (stream == null) ? IntPtr.Zero : stream.Ptr);
}
示例5: DrawKeypoints
/// <summary>
/// Draw the keypoints found on the image.
/// </summary>
/// <param name="image">The image</param>
/// <param name="keypoints">The keypoints to be drawn</param>
/// <param name="color">The color used to draw the keypoints</param>
/// <param name="type">The drawing type</param>
/// <param name="outImage">The image with the keypoints drawn</param>
public static void DrawKeypoints(
IInputArray image,
VectorOfKeyPoint keypoints,
IInputOutputArray outImage,
Bgr color,
Features2DToolbox.KeypointDrawType type)
{
MCvScalar c = color.MCvScalar;
using (InputArray iaImage = image.GetInputArray())
using (InputOutputArray ioaOutImage = outImage.GetInputOutputArray())
CvInvoke.drawKeypoints(iaImage, keypoints, ioaOutImage, ref c, type);
}
示例6: DrawCircle
public static void DrawCircle(IInputOutputArray image,
Point center,
int radius,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
int shift = 0)
{
using (InputOutputArray array = image.GetInputOutputArray())
{
cveCircle(array, ref center, radius, ref color, thickness, lineType, shift);
}
}
示例7: GrabCut
/// <summary>
/// The grab cut algorithm for segmentation
/// </summary>
/// <param name="img">The 8-bit 3-channel image to be segmented</param>
/// <param name="mask">Input/output 8-bit single-channel mask. The mask is initialized by the function
/// when mode is set to GC_INIT_WITH_RECT. Its elements may have one of following values:
/// 0 (GC_BGD) defines an obvious background pixels.
/// 1 (GC_FGD) defines an obvious foreground (object) pixel.
/// 2 (GC_PR_BGR) defines a possible background pixel.
/// 3 (GC_PR_FGD) defines a possible foreground pixel.
///</param>
/// <param name="rect">The rectangle to initialize the segmentation</param>
/// <param name="bgdModel">
/// Temporary array for the background model. Do not modify it while you are
/// processing the same image.
/// </param>
/// <param name="fgdModel">
/// Temporary arrays for the foreground model. Do not modify it while you are
/// processing the same image.
/// </param>
/// <param name="iterCount">The number of iterations</param>
/// <param name="type">The initialization type</param>
public static void GrabCut(
IInputArray img,
IInputOutputArray mask,
Rectangle rect,
IInputOutputArray bgdModel,
IInputOutputArray fgdModel,
int iterCount,
CvEnum.GrabcutInitType type)
{
using (InputArray iaImg = img.GetInputArray())
using (InputOutputArray ioaMask = mask == null ? InputOutputArray.GetEmpty() : mask.GetInputOutputArray())
using (InputOutputArray ioaBgdModel = bgdModel.GetInputOutputArray())
using (InputOutputArray ioaFgdModel = fgdModel.GetInputOutputArray())
cveGrabCut(iaImg, ioaMask, ref rect, ioaBgdModel, ioaFgdModel, iterCount, type);
}
示例8: DrawEllipse
public static void DrawEllipse(IInputOutputArray image,
Point center,
Size axes,
double angle,
double startAngle,
double endAngle,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
int shift = 0)
{
using (InputOutputArray array = image.GetInputOutputArray())
{
cveEllipse(array, ref center, ref axes, angle, startAngle, endAngle, ref color, thickness, lineType, shift);
}
}
示例9: DrawMatches
/// <summary>
/// Draw the matched keypoints between the model image and the observered image.
/// </summary>
/// <param name="modelImage">The model image</param>
/// <param name="modelKeypoints">The keypoints in the model image</param>
/// <param name="observerdImage">The observed image</param>
/// <param name="observedKeyPoints">The keypoints in the observed image</param>
/// <param name="matchColor">The color for the match correspondence lines</param>
/// <param name="singlePointColor">The color for highlighting the keypoints</param>
/// <param name="mask">The mask for the matches. Use null for all matches.</param>
/// <param name="flags">The drawing type</param>
/// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
/// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
public static void DrawMatches(
IInputArray modelImage, VectorOfKeyPoint modelKeypoints,
IInputArray observerdImage, VectorOfKeyPoint observedKeyPoints,
VectorOfVectorOfDMatch matches,
IInputOutputArray result,
MCvScalar matchColor, MCvScalar singlePointColor,
IInputArray mask = null,
KeypointDrawType flags = KeypointDrawType.Default)
{
using (InputArray iaModelImage = modelImage.GetInputArray())
using (InputArray iaObserverdImage = observerdImage.GetInputArray())
using (InputOutputArray ioaResult = result.GetInputOutputArray())
using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
CvInvoke.drawMatchedFeatures(iaObserverdImage, observedKeyPoints, iaModelImage,
modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, iaMask , flags);
}
示例10: DrawContours
public static void DrawContours(IInputOutputArray image,
IInputArray contours,
int contourIdx,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
IInputArray hierarchy = null,
int maxLevel = int.MaxValue,
Point offset = default(Point))
{
using (InputOutputArray imageArray = image.GetInputOutputArray())
{
using (InputArray contoursArray = contours.GetInputArray())
{
using (InputArray hierarchyArray = (hierarchy != null) ? hierarchy.GetInputArray() : EmptyArray<InputArray>.Value)
{
cveDrawContours(imageArray, contoursArray, contourIdx, ref color, thickness, lineType, hierarchyArray, maxLevel, ref offset);
}
}
}
}
示例11: UpdateMotionHistory
/// <summary>
/// Updates the motion history image as following:
/// mhi(x,y)=timestamp if silhouette(x,y)!=0
/// 0 if silhouette(x,y)=0 and mhi(x,y)<timestamp-duration
/// mhi(x,y) otherwise
/// That is, MHI pixels where motion occurs are set to the current timestamp, while the pixels where motion happened far ago are cleared.
/// </summary>
/// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs. </param>
/// <param name="mhi">Motion history image, that is updated by the function (single-channel, 32-bit floating-point) </param>
/// <param name="timestamp">Current time in milliseconds or other units. </param>
/// <param name="duration">Maximal duration of motion track in the same units as timestamp. </param>
public static void UpdateMotionHistory(IInputArray silhouette, IInputOutputArray mhi, double timestamp, double duration)
{
using (InputArray iaSilhouette = silhouette.GetInputArray())
using (InputOutputArray ioaMhi = mhi.GetInputOutputArray())
cveUpdateMotionHistory(iaSilhouette, ioaMhi, timestamp, duration);
}
示例12: CalcOpticalFlowFarneback
/// <summary>
/// Computes dense optical flow using Gunnar Farneback's algorithm
/// </summary>
/// <param name="prev0">The first 8-bit single-channel input image</param>
/// <param name="next0">The second input image of the same size and the same type as prevImg</param>
/// <param name="flow">The computed flow image; will have the same size as prevImg and type CV 32FC2</param>
/// <param name="pyrScale">Specifies the image scale (!1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous</param>
/// <param name="levels">The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used</param>
/// <param name="winSize">The averaging window size; The larger values increase the algorithm robustness to image noise and give more chances for fast motion detection, but yield more blurred motion field</param>
/// <param name="iterations">The number of iterations the algorithm does at each pyramid level</param>
/// <param name="polyN">Size of the pixel neighborhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly n=5 or 7</param>
/// <param name="polySigma">Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly n=5 you can set poly sigma=1.1, for poly n=7 a good value would be poly sigma=1.5</param>
/// <param name="flags">The operation flags</param>
public static void CalcOpticalFlowFarneback(
IInputArray prev0,
IInputArray next0,
IInputOutputArray flow,
double pyrScale,
int levels,
int winSize,
int iterations,
int polyN,
double polySigma,
CvEnum.OpticalflowFarnebackFlag flags)
{
using (InputArray iaPrev0 = prev0.GetInputArray())
using (InputArray iaNext0 = next0.GetInputArray())
using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
cveCalcOpticalFlowFarneback(iaPrev0, iaNext0, ioaFlow, pyrScale, levels, winSize, iterations, polyN, polySigma, flags);
}
示例13: Rectangle
/// <summary>
/// Draws a rectangle specified by a CvRect structure
/// </summary>
/// /// <param name="img">Image</param>
/// <param name="rect">The rectangle to be drawn</param>
/// <param name="color">Line color </param>
/// <param name="thickness">Thickness of lines that make up the rectangle. Negative values make the function to draw a filled rectangle.</param>
/// <param name="lineType">Type of the line</param>
/// <param name="shift">Number of fractional bits in the point coordinates</param>
public static void Rectangle(IInputOutputArray img, Rectangle rect, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
{
using (InputOutputArray ioaImg = img.GetInputOutputArray())
cveRectangle(ioaImg, ref rect, ref color, thickness, lineType, shift);
}
示例14: ArrowedLine
/// <summary>
/// Draws a arrow segment pointing from the first point to the second one.
/// </summary>
/// <param name="img">Image</param>
/// <param name="pt1">The point the arrow starts from.</param>
/// <param name="pt2">The point the arrow points to.</param>
/// <param name="color">Line color.</param>
/// <param name="thickness">Line thickness.</param>
/// <param name="lineType">Type of the line.</param>
/// <param name="shift">Number of fractional bits in the point coordinates.</param>
/// <param name="tipLength">The length of the arrow tip in relation to the arrow length</param>
public static void ArrowedLine(IInputOutputArray img, Point pt1, Point pt2, MCvScalar color, int thickness = 1,
CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0, double tipLength = 0.1)
{
using (InputOutputArray ioaImg = img.GetInputOutputArray())
{
cveArrowedLine(ioaImg, ref pt1, ref pt2, ref color, thickness, lineType, shift, tipLength);
}
}
示例15: FloodFill
/// <summary>
/// Fills a connected component with given color.
/// </summary>
/// <param name="src">Input 1- or 3-channel, 8-bit or floating-point image. It is modified by the function unless CV_FLOODFILL_MASK_ONLY flag is set.</param>
/// <param name="seedPoint">The starting point.</param>
/// <param name="newVal">New value of repainted domain pixels.</param>
/// <param name="loDiff">Maximal lower brightness/color difference
/// between the currently observed pixel and one of its neighbor belong to the component
/// or seed pixel to add the pixel to component.
/// In case of 8-bit color images it is packed value.</param>
/// <param name="upDiff">Maximal upper brightness/color difference
/// between the currently observed pixel and one of its neighbor belong to the component
/// or seed pixel to add the pixel to component.
/// In case of 8-bit color images it is packed value.</param>
/// <param name="flags">The operation flags.
/// Lower bits contain connectivity value, 4 (by default) or 8, used within the function.
/// Connectivity determines which neighbors of a pixel are considered.
/// Upper bits can be 0 or combination of the following flags:
/// CV_FLOODFILL_FIXED_RANGE - if set the difference between the current pixel and seed pixel is considered,
/// otherwise difference between neighbor pixels is considered (the range is floating).
/// CV_FLOODFILL_MASK_ONLY - if set, the function does not fill the image (new_val is ignored),
/// but the fills mask (that must be non-NULL in this case). </param>
/// <param name="mask">Operation mask,
/// should be singe-channel 8-bit image, 2 pixels wider and 2 pixels taller than image.
/// If not IntPtr.Zero, the function uses and updates the mask, so user takes responsibility of initializing mask content.
/// Floodfilling can't go across non-zero pixels in the mask, for example, an edge detector output can be used as a mask to stop filling at edges.
/// Or it is possible to use the same mask in multiple calls to the function to make sure the filled area do not overlap.
/// Note: because mask is larger than the filled image, pixel in mask that corresponds to (x,y) pixel in image will have coordinates (x+1,y+1).</param>
/// <param name="rect">Output parameter set by the function to the minimum bounding rectangle of the repainted domain.</param>
/// <param name="connectivity">Flood fill connectivity</param>
public static int FloodFill(
IInputOutputArray src,
IInputOutputArray mask,
Point seedPoint,
MCvScalar newVal,
out Rectangle rect,
MCvScalar loDiff,
MCvScalar upDiff,
CvEnum.Connectivity connectivity = CvEnum.Connectivity.FourConnected,
CvEnum.FloodFillType flags = CvEnum.FloodFillType.Default)
{
rect = new Rectangle();
using (InputOutputArray ioaSrc = src.GetInputOutputArray())
using (InputOutputArray ioaMask = mask == null ? InputOutputArray.GetEmpty() : mask.GetInputOutputArray())
return cveFloodFill(
ioaSrc,
ioaMask,
ref seedPoint, ref newVal,
ref rect,
ref loDiff, ref upDiff, (int)connectivity | (int)flags);
}