本文整理汇总了C#中Contour.Push方法的典型用法代码示例。如果您正苦于以下问题:C# Contour.Push方法的具体用法?C# Contour.Push怎么用?C# Contour.Push使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Contour
的用法示例。
在下文中一共展示了Contour.Push方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestContourCreate
public void TestContourCreate()
{
using (MemStorage stor = new MemStorage())
{
Contour<Point> contour = new Contour<Point>(stor);
contour.Push(new Point(0, 0));
contour.Push(new Point(0, 2));
contour.Push(new Point(2, 2));
contour.Push(new Point(2, 0));
Assert.IsTrue(contour.Convex);
Assert.AreEqual(contour.Area, 4.0);
//InContour function requires MCvContour.rect to be pre-computed
CvInvoke.cvBoundingRect(contour, 1);
Assert.GreaterOrEqual(contour.InContour(new Point(1, 1)), 0);
Assert.Less(contour.InContour(new Point(3, 3)), 0);
Contour<PointF> contourF = new Contour<PointF>(stor);
contourF.Push(new PointF(0, 0));
contourF.Push(new PointF(0, 2));
contourF.Push(new PointF(2, 2));
contourF.Push(new PointF(2, 0));
Assert.IsTrue(contourF.Convex);
Assert.AreEqual(contourF.Area, 4.0);
//InContour function requires MCvContour.rect to be pre-computed
CvInvoke.cvBoundingRect(contourF, 1);
Assert.GreaterOrEqual(contourF.InContour(new PointF(1, 1)), 0);
Assert.Less(contourF.InContour(new PointF(3, 3)), 0);
Contour<MCvPoint2D64f> contourD = new Contour<MCvPoint2D64f>(stor);
contourD.Push(new MCvPoint2D64f(0, 0));
contourD.Push(new MCvPoint2D64f(0, 2));
contourD.Push(new MCvPoint2D64f(2, 2));
contourD.Push(new MCvPoint2D64f(2, 0));
//Assert.IsTrue(contourD.Convex);
//Assert.AreEqual(contourD.Area, 4.0);
//InContour function requires MCvContour.rect to be pre-computed
//CvInvoke.cvBoundingRect(contourD, 1);
//Assert.GreaterOrEqual(contourD.InContour(new PointF(1, 1)), 0);
//Assert.Less(contourD.InContour(new PointF(3, 3)), 0);
Seq<Point> seq = new Seq<Point>(CvInvoke.CV_MAKETYPE(4, 2), stor);
seq.Push(new Point(0, 0));
seq.Push(new Point(0, 1));
seq.Push(new Point(1, 1));
seq.Push(new Point(1, 0));
}
}
示例2: Draw
//.........这里部分代码省略.........
}
watch.Stop();
}
}
}
else
{
//extract features from the object image
modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);
watch = Stopwatch.StartNew();
// extract features from the observed image
observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
matcher.Add(modelDescriptors);
if (observedDescriptors == null)
{
watch.Stop();
matchTime = watch.ElapsedMilliseconds;
return null;
}
indices = new Matrix<int>(observedDescriptors.Rows, k);
using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
{
matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
mask = new Matrix<byte>(dist.Rows, 1);
mask.SetValue(255);
Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
}
int nonZeroCount = CvInvoke.cvCountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
}
watch.Stop();
}
//Draw the matched keypoints
Image<Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);
#region draw the projected region on the image
if (homography != null)
{ //draw a rectangle along the projected model
Rectangle rect = modelImage.ROI;
PointF[] pts = new PointF[] {
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)};
using (MemStorage m1 = new MemStorage())
using (MemStorage m2 = new MemStorage())
{
Contour<PointF> objPoly = new Contour<PointF>(m1);
Contour<PointF> scenePoly = new Contour<PointF>(m2);
foreach (PointF i in pts)
{
objPoly.Push(i);
}
homography.ProjectPoints(pts);
foreach (PointF i in pts)
{
scenePoly.Push(i);
}
double ratio = scenePoly.Area / objPoly.Area;
Matrix<double> row = homography.GetRow(2);
if (!(ratio >= .005 && ratio <= 1.25))
{
result = null;
}
//if (Math.Abs(homography.Data[2, 0]) > .003 || Math.Abs(homography.Data[2, 1]) > .003)
//{
// result = null;
//}
else
{
result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
}
}
}
else
{
result = null;
}
#endregion
matchTime = watch.ElapsedMilliseconds;
return result;
}
示例3: Detect
public static Boolean Detect(ObjectDetectee observedScene, ObjectDetectee obj)
{
HomographyMatrix homography = null;
VectorOfKeyPoint observedKeyPoints;
Matrix<int> indices;
Matrix<byte> mask;
int k = 2;
double uniquenessThreshold = 0.8;
int testsPassed = 0;
// extract features from the observed image
observedKeyPoints = observedScene.objectKeyPoints;
Matrix<float> observedDescriptors = observedScene.objectDescriptors;
BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
matcher.Add(obj.objectDescriptors);
if (observedDescriptors == null)
{
return false;
}
indices = new Matrix<int>(observedDescriptors.Rows, k);
using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
{
matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
mask = new Matrix<byte>(dist.Rows, 1);
mask.SetValue(255);
Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
}
int nonZero = 0;
int nonZeroCount = CvInvoke.cvCountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(obj.objectKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
{
homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(obj.objectKeyPoints, observedKeyPoints, indices, mask, 2);
for (int i = 0; i < mask.Height; i++)
{
for (int j = 0; j < mask.Width; j++)
{
if (mask[i, j] != 0)
{
nonZero++;
}
}
}
if (nonZero > 4)
{
testsPassed++;
}
}
}
if (homography != null)
{
//draw a rectangle along the projected model
Rectangle rect = obj.objectImage.ROI;
PointF[] pts = new PointF[] {
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)};
using (MemStorage m1 = new MemStorage())
using (MemStorage m2 = new MemStorage())
{
Contour<PointF> objPoly = new Contour<PointF>(m1);
Contour<PointF> scenePoly = new Contour<PointF>(m2);
pts.OrderBy(p => p.X).ThenBy(p => p.Y);
foreach (PointF i in pts)
{
objPoly.Push(i);
}
homography.ProjectPoints(pts);
pts.OrderBy(p => p.X).ThenBy(p => p.Y);
foreach (PointF i in pts)
{
scenePoly.Push(i);
}
double shapeMatch = CvInvoke.cvMatchShapes(objPoly, scenePoly, Emgu.CV.CvEnum.CONTOURS_MATCH_TYPE.CV_CONTOURS_MATCH_I3, 0);
double ratio = scenePoly.Area / objPoly.Area;
foreach (PointF i in pts)
{
if (i.X < 0 || i.Y < 0)
{
return false;
}
}
if (shapeMatch != 0 && shapeMatch <= 2)
{
testsPassed++;
}
if (ratio > 0.001 && ratio < 5.25)
{
testsPassed++;
}
//.........这里部分代码省略.........
示例4: InterrelationFunc
/// <summary>
/// Autocorrelation function, which retun array with Complex numbers of normal products of
/// all orders of vectors in both contours. Needs to correct NormalScalarProd funtion warning.
/// </summary>
/// <returns>The func.</returns>
/// <param name="c1"> First contour. </param>
/// <param name="c2"> Second contour. </param>
public static Complex[] InterrelationFunc(Contour c1,Contour c2)
{
if (c1.Length != c2.Length)
return null;
int len = c2.Length ;
Complex[] res = new Complex[len];
for (int i = 0; i < len; i++)
{
res[i] = NormalScalarProd(c1, c2);
c2.Push();
}
return res;
}