本文整理汇总了C#中IInputOutputArray类的典型用法代码示例。如果您正苦于以下问题:C# IInputOutputArray类的具体用法?C# IInputOutputArray怎么用?C# IInputOutputArray使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
IInputOutputArray类属于命名空间,在下文中一共展示了IInputOutputArray类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FindLargestContour
public static VectorOfPoint FindLargestContour(IInputOutputArray cannyEdges, IInputOutputArray result)
{
int largest_contour_index = 0;
double largest_area = 0;
VectorOfPoint largestContour;
using (Mat hierachy = new Mat())
using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
{
IOutputArray hirarchy;
CvInvoke.FindContours(cannyEdges, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxNone);
for (int i = 0; i < contours.Size; i++)
{
MCvScalar color = new MCvScalar(0, 0, 255);
double a = CvInvoke.ContourArea(contours[i], false); // Find the area of contour
if (a > largest_area)
{
largest_area = a;
largest_contour_index = i; //Store the index of largest contour
}
CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(255, 0, 0));
}
CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(0, 0, 255), 3, LineType.EightConnected, hierachy);
largestContour = new VectorOfPoint(contours[largest_contour_index].ToArray());
}
return largestContour;
}
示例2: Calc
public static void Calc(this ICudaDenseOpticalFlow denseFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow, Stream stream = null)
{
using (InputArray iaI0 = i0.GetInputArray())
using (InputArray iaI1 = i1.GetInputArray())
using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
cudaDenseOpticalFlowCalc(denseFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow, (stream == null) ? IntPtr.Zero : stream.Ptr);
}
示例3: GetRedPixelMask
/// <summary>
/// Compute the red pixel mask for the given image.
/// A red pixel is a pixel where: 20 < hue < 160 AND saturation > 10
/// </summary>
/// <param name="image">The color image to find red mask from</param>
/// <param name="mask">The red pixel mask</param>
private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
{
bool useUMat;
using (InputOutputArray ia = mask.GetInputOutputArray())
useUMat = ia.IsUMat;
using (IImage hsv = useUMat ? (IImage)new UMat() : (IImage)new Mat())
using (IImage s = useUMat ? (IImage)new UMat() : (IImage)new Mat())
{
CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
CvInvoke.ExtractChannel(hsv, mask, 0);
CvInvoke.ExtractChannel(hsv, s, 1);
//the mask for hue less than 20 or larger than 160
using (ScalarArray lower = new ScalarArray(20))
using (ScalarArray upper = new ScalarArray(160))
CvInvoke.InRange(mask, lower, upper, mask);
CvInvoke.BitwiseNot(mask, mask);
//s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
CvInvoke.BitwiseAnd(mask, s, mask, null);
}
}
示例4: ProcessImage
private void ProcessImage(IInputOutputArray image)
{
Stopwatch watch = Stopwatch.StartNew(); // time the detection process
List<IInputOutputArray> licensePlateImagesList = new List<IInputOutputArray>();
List<IInputOutputArray> filteredLicensePlateImagesList = new List<IInputOutputArray>();
List<RotatedRect> licenseBoxList = new List<RotatedRect>();
List<string> words = _licensePlateDetector.DetectLicensePlate(
image,
licensePlateImagesList,
filteredLicensePlateImagesList,
licenseBoxList);
watch.Stop(); //stop the timer
processTimeLabel.Text = String.Format("License Plate Recognition time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);
panel1.Controls.Clear();
Point startPoint = new Point(10, 10);
for (int i = 0; i < words.Count; i++)
{
Mat dest = new Mat();
CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest);
AddLabelAndImage(
ref startPoint,
String.Format("License: {0}", words[i]),
dest);
PointF[] verticesF = licenseBoxList[i].GetVertices();
Point[] vertices = Array.ConvertAll(verticesF, Point.Round);
using(VectorOfPoint pts = new VectorOfPoint(vertices))
CvInvoke.Polylines(image, pts, true, new Bgr(Color.Red).MCvScalar,2 );
}
}
示例5: Calc
/// <summary>
/// Calculates an optical flow.
/// </summary>
/// <param name="i0">First 8-bit single-channel input image.</param>
/// <param name="i1">Second input image of the same size and the same type as prev.</param>
/// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
/// <param name="opticalFlow">The dense optical flow object</param>
public static void Calc(this IDenseOpticalFlow opticalFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow)
{
using (InputArray iaI0 = i0.GetInputArray())
using (InputArray iaI1 = i1.GetInputArray())
using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
CvInvoke.cveDenseOpticalFlowCalc(opticalFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow);
}
示例6: Calc
/// <summary>
/// Calculates a sparse optical flow.
/// </summary>
/// <param name="sparseFlow">The sparse optical flow</param>
/// <param name="prevImg">First input image.</param>
/// <param name="nextImg">Second input image of the same size and the same type as <paramref name="prevImg"/>.</param>
/// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
/// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
/// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found. Otherwise, it is set to 0.</param>
/// <param name="err">Optional output vector that contains error response for each point (inverse confidence).</param>
/// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
public static void Calc(this ICudaSparseOpticalFlow sparseFlow, IInputArray prevImg, IInputArray nextImg, IInputArray prevPts, IInputOutputArray nextPts, IOutputArray status = null, IOutputArray err = null, Stream stream = null)
{
using (InputArray iaPrevImg = prevImg.GetInputArray())
using (InputArray iaNextImg = nextImg.GetInputArray())
using (InputArray iaPrevPts = prevPts.GetInputArray())
using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
using (OutputArray oaStatus = (status == null ? OutputArray.GetEmpty() : status.GetOutputArray()))
using (OutputArray oaErr = (err == null ? OutputArray.GetEmpty() : err.GetOutputArray()))
cudaSparseOpticalFlowCalc(sparseFlow.SparseOpticalFlowPtr, iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts,
oaStatus, oaErr, (stream == null) ? IntPtr.Zero : stream.Ptr);
}
示例7: DrawKeypoints
/// <summary>
/// Draw the keypoints found on the image.
/// </summary>
/// <param name="image">The image</param>
/// <param name="keypoints">The keypoints to be drawn</param>
/// <param name="color">The color used to draw the keypoints</param>
/// <param name="type">The drawing type</param>
/// <param name="outImage">The image with the keypoints drawn</param>
public static void DrawKeypoints(
IInputArray image,
VectorOfKeyPoint keypoints,
IInputOutputArray outImage,
Bgr color,
Features2DToolbox.KeypointDrawType type)
{
MCvScalar c = color.MCvScalar;
using (InputArray iaImage = image.GetInputArray())
using (InputOutputArray ioaOutImage = outImage.GetInputOutputArray())
CvInvoke.drawKeypoints(iaImage, keypoints, ioaOutImage, ref c, type);
}
示例8: DrawAxis
/// <summary>
/// Given the pose estimation of a marker or board, this function draws the axis of the world coordinate system, i.e. the system centered on the marker/board. Useful for debugging purposes.
/// </summary>
/// <param name="image">input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
/// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
/// <param name="distCoeffs">vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
/// <param name="rvec">rotation vector of the coordinate system that will be drawn.</param>
/// <param name="tvec">translation vector of the coordinate system that will be drawn.</param>
/// <param name="length">length of the painted axis in the same unit than tvec (usually in meters)</param>
public static void DrawAxis(
IInputOutputArray image, IInputArray cameraMatrix, IInputArray distCoeffs,
IInputArray rvec, IInputArray tvec, float length)
{
using (InputOutputArray ioaImage = image.GetInputOutputArray())
using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
using (InputArray iaRvec = rvec.GetInputArray())
using (InputArray iaTvec = tvec.GetInputArray())
{
cveArucoDrawAxis(ioaImage, iaCameraMatrix, iaDistCoeffs, iaRvec, iaTvec, length);
}
}
示例9: DrawCircle
public static void DrawCircle(IInputOutputArray image,
Point center,
int radius,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
int shift = 0)
{
using (InputOutputArray array = image.GetInputOutputArray())
{
cveCircle(array, ref center, radius, ref color, thickness, lineType, shift);
}
}
示例10: DrawLine
public static void DrawLine(IInputOutputArray image,
Point start,
Point end,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
int shift = 0)
{
using (InputOutputArray array = image.GetInputOutputArray())
{
cveLine(array, ref start, ref end, ref color, thickness, lineType, shift);
}
}
示例11: FindRectangle
public static VectorOfVectorOfPoint FindRectangle(IInputOutputArray cannyEdges, IInputOutputArray result, int areaSize = 250)
{
using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
{
CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
int count = contours.Size;
for (int i = 0; i < count; i++)
{
var rect = CvInvoke.MinAreaRect(contours[i]).MinAreaRect();
CvInvoke.Rectangle(result, rect, new MCvScalar(0, 0, 255), 3);
using (VectorOfPoint contour = contours[i])
using (VectorOfPoint approxContour = new VectorOfPoint())
{
CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);
if (CvInvoke.ContourArea(approxContour, false) > areaSize) //only consider contours with area greater than 250
{
if (approxContour.Size >= 4) //The contour has 4 vertices.
{
#region determine if all the angles in the contour are within [80, 100] degree
bool isRectangle = true;
Point[] pts = approxContour.ToArray();
LineSegment2D[] edges = PointCollection.PolyLine(pts, true);
for (int j = 0; j < edges.Length; j++)
{
double angle = Math.Abs(
edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
if (angle < 80 || angle > 100)
{
isRectangle = false;
break;
}
}
#endregion
//if (isRectangle)
//{
// var rect = CvInvoke.MinAreaRect(approxContour).MinAreaRect();
// CvInvoke.Rectangle(result, rect, new MCvScalar(0, 0, 255), 3);
// //boxList.Add(CvInvoke.MinAreaRect(approxContour));
//}
}
}
}
}
return contours;
}
}
示例12: DrawEllipse
public static void DrawEllipse(IInputOutputArray image,
RotatedRect box,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
int shift = 0)
{
int width = (int)Math.Round(box.Size.Height * 0.5F);
int height = (int)Math.Round(box.Size.Width * 0.5F);
Size axesSize = new Size(width, height);
Point center = Point.Round(box.Center);
DrawEllipse(image, center, axesSize, box.Angle, 0.0D, 360.0D, color, thickness, lineType, shift);
}
示例13: GrabCut
/// <summary>
/// The grab cut algorithm for segmentation
/// </summary>
/// <param name="img">The 8-bit 3-channel image to be segmented</param>
/// <param name="mask">Input/output 8-bit single-channel mask. The mask is initialized by the function
/// when mode is set to GC_INIT_WITH_RECT. Its elements may have one of following values:
/// 0 (GC_BGD) defines an obvious background pixels.
/// 1 (GC_FGD) defines an obvious foreground (object) pixel.
/// 2 (GC_PR_BGR) defines a possible background pixel.
/// 3 (GC_PR_FGD) defines a possible foreground pixel.
///</param>
/// <param name="rect">The rectangle to initialize the segmentation</param>
/// <param name="bgdModel">
/// Temporary array for the background model. Do not modify it while you are
/// processing the same image.
/// </param>
/// <param name="fgdModel">
/// Temporary arrays for the foreground model. Do not modify it while you are
/// processing the same image.
/// </param>
/// <param name="iterCount">The number of iterations</param>
/// <param name="type">The initialization type</param>
public static void GrabCut(
IInputArray img,
IInputOutputArray mask,
Rectangle rect,
IInputOutputArray bgdModel,
IInputOutputArray fgdModel,
int iterCount,
CvEnum.GrabcutInitType type)
{
using (InputArray iaImg = img.GetInputArray())
using (InputOutputArray ioaMask = mask == null ? InputOutputArray.GetEmpty() : mask.GetInputOutputArray())
using (InputOutputArray ioaBgdModel = bgdModel.GetInputOutputArray())
using (InputOutputArray ioaFgdModel = fgdModel.GetInputOutputArray())
cveGrabCut(iaImg, ioaMask, ref rect, ioaBgdModel, ioaFgdModel, iterCount, type);
}
示例14: DrawMatches
/// <summary>
/// Draw the matched keypoints between the model image and the observered image.
/// </summary>
/// <param name="modelImage">The model image</param>
/// <param name="modelKeypoints">The keypoints in the model image</param>
/// <param name="observerdImage">The observed image</param>
/// <param name="observedKeyPoints">The keypoints in the observed image</param>
/// <param name="matchColor">The color for the match correspondence lines</param>
/// <param name="singlePointColor">The color for highlighting the keypoints</param>
/// <param name="mask">The mask for the matches. Use null for all matches.</param>
/// <param name="flags">The drawing type</param>
/// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
/// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
public static void DrawMatches(
IInputArray modelImage, VectorOfKeyPoint modelKeypoints,
IInputArray observerdImage, VectorOfKeyPoint observedKeyPoints,
VectorOfVectorOfDMatch matches,
IInputOutputArray result,
MCvScalar matchColor, MCvScalar singlePointColor,
IInputArray mask = null,
KeypointDrawType flags = KeypointDrawType.Default)
{
using (InputArray iaModelImage = modelImage.GetInputArray())
using (InputArray iaObserverdImage = observerdImage.GetInputArray())
using (InputOutputArray ioaResult = result.GetInputOutputArray())
using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
CvInvoke.drawMatchedFeatures(iaObserverdImage, observedKeyPoints, iaModelImage,
modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, iaMask , flags);
}
示例15: DrawContours
public static void DrawContours(IInputOutputArray image,
IInputArray contours,
int contourIdx,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
IInputArray hierarchy = null,
int maxLevel = int.MaxValue,
Point offset = default(Point))
{
using (InputOutputArray imageArray = image.GetInputOutputArray())
{
using (InputArray contoursArray = contours.GetInputArray())
{
using (InputArray hierarchyArray = (hierarchy != null) ? hierarchy.GetInputArray() : EmptyArray<InputArray>.Value)
{
cveDrawContours(imageArray, contoursArray, contourIdx, ref color, thickness, lineType, hierarchyArray, maxLevel, ref offset);
}
}
}
}