本文整理汇总了C#中Image.FindContours方法的典型用法代码示例。如果您正苦于以下问题:C# Image.FindContours方法的具体用法?C# Image.FindContours怎么用?C# Image.FindContours使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image.FindContours方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FindRectangles
private void FindRectangles(Image<Gray, Byte> blackAndWhiteImage)
{
m_FoundRectangles.Clear();
using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
{
for (Contour<Point> contours = blackAndWhiteImage.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST,
storage);
contours != null;
contours = contours.HNext)
{
Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage);
//Debug.WriteLine(currentContour.Area);
if (currentContour.Area > 250) //only consider contours with area greater than 250
{
if (currentContour.Total == 4) //The contour has 4 vertices.
{
if (IsRectangle(currentContour))
{
m_FoundRectangles.Add(currentContour.GetMinAreaRect());
}
}
}
}
}
}
示例2: button1_Click
private void button1_Click(object sender, EventArgs e)
{
OpenFileDialog Openfile = new OpenFileDialog();
if (Openfile.ShowDialog() == DialogResult.OK)
{
Image<Bgr, byte> My_Image = new Image<Bgr, byte>(Openfile.FileName);
Image<Gray, byte> gray_image = My_Image.Convert<Gray, byte>();
Image<Gray, byte> eh_gray_image = My_Image.Convert<Gray, byte>();
Image<Gray, byte> smooth_gray_image = My_Image.Convert<Gray, byte>();
Image<Gray, byte> ed_gray_image = new Image<Gray, byte>(gray_image.Size);
Image<Bgr, byte> final_image = new Image<Bgr, byte>(Openfile.FileName);
MemStorage stor = new MemStorage();
List<MCvBox2D> detectedLicensePlateRegionList = new List<MCvBox2D>();
CvInvoke.cvEqualizeHist(gray_image, eh_gray_image);
CvInvoke.cvSmooth(eh_gray_image, smooth_gray_image, Emgu.CV.CvEnum.SMOOTH_TYPE.CV_GAUSSIAN, 3, 3, 0, 0);
//CvInvoke.cvAdaptiveThreshold(smooth_gray_image, bi_gray_image, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_GAUSSIAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, 71, 1);
CvInvoke.cvCanny(smooth_gray_image, ed_gray_image, 100, 50, 3);
Contour<Point> contours = ed_gray_image.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, stor);
DetectPlate(contours, detectedLicensePlateRegionList);
for (int i = 0; i < detectedLicensePlateRegionList.Count; i++)
{
final_image.Draw(detectedLicensePlateRegionList[i], new Bgr(Color.Red), 2);
}
imageBox1.Image = My_Image;
imageBox2.Image = gray_image;
imageBox3.Image = eh_gray_image;
imageBox4.Image = smooth_gray_image;
imageBox5.Image = ed_gray_image;
imageBox6.Image = final_image;
}
}
示例3: DetectLicensePlate
/// <summary>
/// Detect license plate from the given image
/// </summary>
/// <param name="img">The image to search license plate from</param>
/// <param name="licensePlateImagesList">A list of images where the detected license plate regions are stored</param>
/// <param name="filteredLicensePlateImagesList">A list of images where the detected license plate regions (with noise removed) are stored</param>
/// <param name="detectedLicensePlateRegionList">A list where the regions of license plate (defined by an MCvBox2D) are stored</param>
/// <returns>The list of words for each license plate</returns>
public List<List<Word>> DetectLicensePlate(
Image<Bgr, byte> img,
List<Image<Gray, Byte>> licensePlateImagesList,
List<Image<Gray, Byte>> filteredLicensePlateImagesList,
List<MCvBox2D> detectedLicensePlateRegionList)
{
List<List<Word>> licenses = new List<List<Word>>();
// Convert image to gray
using (Image<Gray, byte> gray = img.Convert<Gray, Byte>())
// Create Canny image
using (Image<Gray, Byte> canny = new Image<Gray, byte>(gray.Size))
//Create MemStorage
using (MemStorage stor = new MemStorage())
{
//Convert gray with Canny Algorithm
CvInvoke.cvCanny(gray, canny, 130, 70, 3);
//List all Contour
Contour<Point> contours = canny.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE,
stor);
//Check Contour
FindLicensePlate(contours, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
}
return licenses;
}
示例4: findContourRects
private static List<Rectangle> findContourRects(Image<Gray, byte> gImage, int minArea)
{
List<Rectangle> ratRect = new List<Rectangle>();
Contour<Point> contours = gImage.FindContours();
ratRect = findContours(contours, minArea);
return ratRect;
}
示例5: GetContours
public static IEnumerable<Contour<Point>> GetContours(Image<Gray, byte> canny)
{
using (var storage = new MemStorage())
{
var contours = canny.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_TREE, storage);
while (contours != null)
{
yield return contours;
contours = contours.HNext;
}
}
}
示例6: DetectStopSign
public List<SignResult> DetectStopSign(Image<Bgr, byte> image, out Image<Gray, byte> filteredImage)
{
filteredImage = GetFilteredImage(image);
List<SignResult> results = new List<SignResult>();
using (MemStorage storage = new MemStorage())
{
Contour<Point> contours = filteredImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, storage);
results = FindSign(image, contours);
}
return results;
}
示例7: Form8_Load
private void Form8_Load(object sender, EventArgs e)
{
bmpShow = ImageProcessor.ChangeSize(ShowBin.bmp, ShowBin.bmp.Width, ShowBin.bmp.Height + 4, 0, 2);
Image<Bgr, Byte> bmp = new Image<Bgr, byte>(bmpShow);
Image<Gray, byte> bmpContour = new Image<Gray, byte>(bmpShow);
Contour<Point> foundContours = bmpContour.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
completeContour = FilterContours(foundContours);
imageBox1.Image = bmp;
bmpClear = new Bitmap(bmpShow.Width, bmpShow.Height);
using (Graphics g = Graphics.FromImage(bmpClear))
g.FillRectangle(Brushes.White, 0, 0, bmpShow.Width, bmpShow.Height);
toolStripStatusLabel1.Text = String.Empty;
toolStripStatusLabel2.Text = String.Empty;
}
示例8: ContourRecognize
public static string ContourRecognize(List<Bitmap> segments)
{
string result = String.Empty;
for (int i = 0; i < segments.Count; i++)
{
Bitmap bmp = segments[i];
Image<Gray, byte> segment = new Image<Gray, byte>(bmp);
Contour<Point> foundContours = segment.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
List<Contour<Point>> completeContour = FilterContours(foundContours);
if ((completeContour.Count == 1) && (completeContour[0].Perimeter < 10))
continue;
double contourMoment = 0;
foreach (Contour<Point> contour in completeContour)
{
MCvMoments moments = contour.GetMoments();
contourMoment += moments.m00 + moments.m01;
}
int index = 0;
double delta = contourMoment;
for (int k = 0; k < Form1.moments.Count; k++)
{
double moment = double.Parse(Form1.moments[k][1]);
if (Math.Abs(contourMoment - moment) < delta)
{
delta = Math.Abs(contourMoment - moment);
index = k;
}
}
result += Form1.moments[index][0];
}
return result;
}
示例9: Detect
public Blob Detect(Image<Bgr, byte> frame)
{
Image<Bgr, byte> bgr = frame;
Image<Gray, byte> b = bgr.Split()[0].InRange(new Gray(Minimum.Blue), new Gray(Maximum.Blue));
Image<Gray, byte> g = bgr.Split()[1].InRange(new Gray(Minimum.Green), new Gray(Maximum.Green));
Image<Gray, byte> r = bgr.Split()[2].InRange(new Gray(Minimum.Red), new Gray(Maximum.Red));
Image<Gray, byte> finalResult = new Image<Gray, byte>(frame.Width, frame.Height);
Gray black = new Gray(0);
Gray white = new Gray(255);
for (int x = 0; x < frame.Height; x++)
{
for (int y = 0; y < frame.Width; y++)
{
if (b[x, y].Intensity == 255 && g[x, y].Intensity == 255 && r[x, y].Intensity == 255)
{
finalResult[x, y] = white;
}
else
{
finalResult[x, y] = black;
}
}
}
Contour<Point> largestContour = null;
// TODO: Find reference for this
for (Contour<Point> c = finalResult.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
RETR_TYPE.CV_RETR_EXTERNAL); c != null; c = c.HNext)
{
if (largestContour == null || c.Area > largestContour.Area)
{
largestContour = c;
}
}
return largestContour == null ? null : new Blob(largestContour);
}
示例10: DetectLicensePlate
/// <summary>
/// Detect license plate from the given image
/// </summary>
/// <param name="img">The image to search license plate from</param>
/// <param name="licensePlateImagesList">A list of images where the detected license plate regions are stored</param>
/// <param name="filteredLicensePlateImagesList">A list of images where the detected license plate regions (with noise removed) are stored</param>
/// <param name="detectedLicensePlateRegionList">A list where the regions of license plate (defined by an MCvBox2D) are stored</param>
/// <returns>The list of words for each license plate</returns>
public List<List<Word>> DetectLicensePlate(
Image<Bgr, byte> img,
List<Image<Gray, Byte>> licensePlateImagesList,
List<Image<Gray, Byte>> filteredLicensePlateImagesList,
List<MCvBox2D> detectedLicensePlateRegionList)
{
List<List<Word>> licenses = new List<List<Word>>();
using (Image<Gray, byte> gray = img.Convert<Gray, Byte>())
using (Image<Gray, Byte> canny = new Image<Gray, byte>(gray.Size))
using (MemStorage stor = new MemStorage())
{
CvInvoke.cvCanny(gray, canny, 100, 50, 3);
Contour<Point> contours = canny.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE,
stor);
FindLicensePlate(contours, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
}
return licenses;
}
示例11: LabelConnectedComponents
public Image<Gray, byte> LabelConnectedComponents(Image<Gray, byte> binary, int startLabel)
{
Contour<Point> contours = binary.FindContours(
CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE,
RETR_TYPE.CV_RETR_CCOMP);
int count = startLabel;
for (Contour<Point> cont = contours;
cont != null;
cont = cont.HNext)
{
CvInvoke.cvDrawContours(
binary,
cont,
new MCvScalar(count),
new MCvScalar(0),
2,
-1,
LINE_TYPE.FOUR_CONNECTED,
new Point(0, 0));
++count;
}
return binary;
}
示例12: TestContour
public void TestContour()
{
//Application.EnableVisualStyles();
//Application.SetCompatibleTextRenderingDefault(false);
using (Image<Gray, Byte> img = new Image<Gray, Byte>(100, 100, new Gray()))
{
Rectangle rect = new Rectangle(10, 10, 80 - 10, 50 - 10);
img.Draw(rect, new Gray(255.0), -1);
//ImageViewer.Show(img);
PointF pIn = new PointF(60, 40);
PointF pOut = new PointF(80, 100);
using (MemStorage stor = new MemStorage())
{
Contour<Point> cs = img.FindContours(CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, CvEnum.RETR_TYPE.CV_RETR_LIST, stor);
Assert.AreEqual(cs.MCvContour.elem_size, Marshal.SizeOf(typeof(Point)));
Assert.AreEqual(rect.Width * rect.Height, cs.Area);
Assert.IsTrue(cs.Convex);
Assert.AreEqual(rect.Width * 2 + rect.Height * 2, cs.Perimeter);
Rectangle rect2 = cs.BoundingRectangle;
rect2.Width -= 1;
rect2.Height -= 1;
//rect2.Center.X -= 0.5;
//rect2.Center.Y -= 0.5;
Assert.IsTrue(rect2.Equals(rect));
Assert.AreEqual(cs.InContour(pIn), 100);
Assert.AreEqual(cs.InContour(pOut), -100);
Assert.AreEqual(cs.Distance(pIn), 10);
Assert.AreEqual(cs.Distance(pOut), -50);
img.Draw(cs, new Gray(100), new Gray(100), 0, 1);
MCvPoint2D64f rectangleCenter = new MCvPoint2D64f(rect.X + rect.Width / 2.0, rect.Y + rect.Height / 2.0);
MCvMoments moment = cs.GetMoments();
MCvPoint2D64f center = moment.GravityCenter;
Assert.AreEqual(center, rectangleCenter);
}
using (MemStorage stor = new MemStorage())
{
Image<Gray, Byte> img2 = new Image<Gray, byte>(300, 200);
Contour<Point> c = img2.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, stor);
Assert.AreEqual(c, null);
}
}
int s1 = Marshal.SizeOf(typeof(MCvSeq));
int s2 = Marshal.SizeOf(typeof(MCvContour));
int sizeRect = Marshal.SizeOf(typeof(Rectangle));
Assert.AreEqual(s1 + sizeRect + 4 * Marshal.SizeOf(typeof(int)), s2);
}
示例13: ExtractContourAndHull
private void ExtractContourAndHull(Image<Gray, byte> skin)
{
List<Contour<Point>> contourList = new List<Contour<Point>>();
Contour<Point> contours = skin.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, contourStorage);
Contour<Point> biggestContour = null;
Double current = 0;
Double largest = 0;
while (contours != null)
{
current = contours.Area;
if (current > largest)
{
largest = current;
biggestContour = contours;
}
contours = contours.HNext;
}
if (biggestContour != null)
{
//currentFrame.Draw(biggestContour, new Bgr(Color.DarkViolet), 2);
Contour<Point> currentContour = biggestContour.ApproxPoly(biggestContour.Perimeter * 0.0025, contourStorage);
currentFrame.Draw(currentContour, new Bgr(System.Drawing.Color.LimeGreen), 2);
biggestContour = currentContour;
hull = biggestContour.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);
box = biggestContour.GetMinAreaRect();
PointF[] points = box.GetVertices();
//handRect = box.MinAreaRect();
//currentFrame.Draw(handRect, new Bgr(200, 0, 0), 1);
Point[] ps = new Point[points.Length];
for (int i = 0; i < points.Length; i++)
ps[i] = new Point((int)points[i].X, (int)points[i].Y);
currentFrame.DrawPolyline(hull.ToArray(), true, new Bgr(200, 125, 75), 2);
currentFrame.Draw(new CircleF(new PointF(box.center.X, box.center.Y), 3), new Bgr(200, 125, 75), 2);
//ellip.MCvBox2D= CvInvoke.cvFitEllipse2(biggestContour.Ptr);
//currentFrame.Draw(new Ellipse(ellip.MCvBox2D), new Bgr(Color.LavenderBlush), 3);
PointF center;
float radius;
//CvInvoke.cvMinEnclosingCircle(biggestContour.Ptr, out center, out radius);
//currentFrame.Draw(new CircleF(center, radius), new Bgr(System.Drawing.Color.Gold), 2);
//currentFrame.Draw(new CircleF(new PointF(ellip.MCvBox2D.center.X, ellip.MCvBox2D.center.Y), 3), new Bgr(100, 25, 55), 2);
//currentFrame.Draw(ellip, new Bgr(Color.DeepPink), 2);
//CvInvoke.cvEllipse(currentFrame, new Point((int)ellip.MCvBox2D.center.X, (int)ellip.MCvBox2D.center.Y), new System.Drawing.Size((int)ellip.MCvBox2D.size.Width, (int)ellip.MCvBox2D.size.Height), ellip.MCvBox2D.angle, 0, 360, new MCvScalar(120, 233, 88), 1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, 0);
//currentFrame.Draw(new Ellipse(new PointF(box.center.X, box.center.Y), new SizeF(box.size.Height, box.size.Width), box.angle), new Bgr(0, 0, 0), 2);
filteredHull = new Seq<Point>(contourStorage);
for (int i = 0; i < hull.Total; i++)
{
if (Math.Sqrt(Math.Pow(hull[i].X - hull[i + 1].X, 2) + Math.Pow(hull[i].Y - hull[i + 1].Y, 2)) > box.size.Width / 10)
{
filteredHull.Push(hull[i]);
}
}
defects = biggestContour.GetConvexityDefacts(contourStorage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);
defectArray = defects.ToArray();
}
}
示例14: DetectObjects
private List<Contour<Point>> DetectObjects(MemStorage storage, Image<Gray, Byte> processedFrame, double ContourAccuracy, int minAreaSize, int maxAreaSize)
{
Contour<Point> biggestContour = null;
// clear filtered list
List<Contour<Point>> filteredUnidentifiedObjects = new List<Contour<Point>> ();
// get all contours, and process each
//for (Contour<Point> contours = processedFrame.Convert<Gray, byte>().FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext) {
for (Contour<Point> contours = processedFrame.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext) {
Contour<Point> currentContour = contours.ApproxPoly (contours.Perimeter * ContourAccuracy, storage);
//find biggest blob
if (biggestContour == null) {
biggestContour = currentContour;
} else if (biggestContour.Area < currentContour.Area) {
biggestContour = currentContour;
}
// Alright this bit has been redone to be extra careful with the values it processes, so if things go wrong
// it's easier to debug
// if detected size is within limits
if (currentContour.Area > minAreaSize && currentContour.Area < maxAreaSize) {
filteredUnidentifiedObjects.Add (currentContour);
}
}
if (calibrateSwitch) {
if (biggestContour != null) {
calibrateSwitch = false;
projectionArea = biggestContour.BoundingRectangle;
Console.WriteLine ("Calibrated to: " + projectionArea.Width + " " + projectionArea.Height + "! ");
} else {
Console.WriteLine ("No contour detected atm! Try again :D");
}
}
return filteredUnidentifiedObjects;
}
示例15: ProcessImage
public void ProcessImage(Emgu.CV.Image<Emgu.CV.Structure.Bgr, byte> image) {
Emgu.CV.Image<Gray, byte> gray = image.Convert<Gray, byte>();
Emgu.CV.Image<Gray, byte> binary = new Image<Gray,byte>(image.Size);
CvInvoke.cvThreshold(gray, binary, 40, 255, THRESH.CV_THRESH_BINARY | THRESH.CV_THRESH_OTSU);
binary._Not();
Emgu.CV.Contour<System.Drawing.Point> contour_points = binary.FindContours();
MemStorage storage = new MemStorage();
Matrix<double> warp = new Matrix<double>(3, 3);
while (contour_points != null) {
Contour<Point> c = contour_points.ApproxPoly(contour_points.Perimeter * 0.05, storage);
double p = c.Perimeter;
if (c.Total == 4 && p > 300) {
PointF[] src = new PointF[] {
new PointF(c[0].X, c[0].Y),
new PointF(c[1].X, c[1].Y),
new PointF(c[2].X, c[2].Y),
new PointF(c[3].X, c[3].Y)};
CvInvoke.cvGetPerspectiveTransform(src, _dest, warp);
int flags = (int)INTER.CV_INTER_LINEAR + (int)WARP.CV_WARP_FILL_OUTLIERS;
CvInvoke.cvWarpPerspective(gray, _roi, warp, flags, new MCvScalar(0));
double min_error;
Orientation orient;
FindBestOrientation(out min_error, out orient);
if (min_error < 0.4) {
image.DrawPolyline(c.ToArray(), true, new Bgr(Color.Green), 2);
System.Console.WriteLine(min_error + " " + orient);
switch (orient) {
case Orientation.Degrees0:
image.Draw(new LineSegment2D(c[0], c[3]), new Bgr(System.Drawing.Color.Red), 2);
break;
case Orientation.Degrees90:
image.Draw(new LineSegment2D(c[1], c[0]), new Bgr(System.Drawing.Color.Red), 2);
break;
case Orientation.Degrees180:
image.Draw(new LineSegment2D(c[2], c[1]), new Bgr(System.Drawing.Color.Red), 2);
break;
case Orientation.Degrees270:
image.Draw(new LineSegment2D(c[3], c[2]), new Bgr(System.Drawing.Color.Red), 2);
break;
}
}
// 0 degrees
}
contour_points = contour_points.HNext;
}
}