本文整理汇总了C#中Image.Draw方法的典型用法代码示例。如果您正苦于以下问题:C# Image.Draw方法的具体用法?C# Image.Draw怎么用?C# Image.Draw使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image.Draw方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Detection
private void Detection(object r, EventArgs e)
{
currentFrame = grabber.QueryFrame();
currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
DetectFace.Detect(currentFrame, "haarcascade_frontalface_default.xml", faces, out detectionTime);
foreach (Rectangle face in faces)
{ //result = currentFrame.Copy(face.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
currentFrame.Draw(face, new Bgr(Color.Red), 2);
//Get copy of img and show it
result = currentFrame.Copy(face).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); //making small copy of face
result._EqualizeHist();
if (Eigen_Recog.IsTrained)
{
string name = Eigen_Recog.Recognise(result);
//Draw the label for each face detected and recognized
currentFrame.Draw(name, ref font, new Point(face.X - 2, face.Y - 2), new Bgr(Color.LightGreen));
}
}
//display the image
ImageViewer.Image = currentFrame;
labelTimeSpend.Text = detectionTime.ToString() + "msec";
faces.Clear();
currentFrame.Dispose();
}
示例2: FindTwoTest
public void FindTwoTest()
{
Image<Gray, Byte> img = new Image<Gray, Byte>(400, 400);
img.Draw(new Rectangle(100, 100, 50, 50), new Gray(255), -1);
img.Draw(new Rectangle(200, 200, 100, 100), new Gray(255), -1);
Assert.AreEqual(new Rectangle(200, 200, 100, 100), recogniser.getBoundingBox(img));
}
示例3: run
private void run()
{
Image<Bgr, Byte> image = new Image<Bgr, byte>("lena.jpg"); //Read the files as an 8-bit Bgr image
Capture vid = new Capture("kw.avi");
vid.FlipVertical = true;
int x = 0;
TimeSpan time = TimeSpan.Zero;
MCvFont font = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_COMPLEX, 1.0, 1.0);
using (VideoWriter vw = new VideoWriter("out3.avi", 15, 640, 480, true))
{
while (vid.Grab())
{
//if (++x % 1 != 0) continue;
image = vid.RetrieveBgrFrame();
long detectionTime;
List<Rectangle> faces = new List<Rectangle>();
List<Rectangle> eyes = new List<Rectangle>();
DetectFace.Detect(image, "haarcascade_frontalface_default.xml", "supersmile.xml", faces, eyes, out detectionTime);
foreach (Rectangle face in faces)
image.Draw(face, new Bgr(Color.Red), 2);
foreach (Rectangle eye in eyes)
image.Draw(eye, new Bgr(Color.Blue), 2);
if (eyes.Count > 0) time = time.Add(new TimeSpan(0, 0, 0, 0, 66));
//display the image
image.Draw(String.Format("{0}:{1}", time.Seconds, time.Milliseconds), ref font, new Point(50, 50), new Bgr(0, 0, 255));
setimage(image);
vw.WriteFrame<Bgr, Byte>(image);
}
}
}
示例4: draw3LineFromList
public static void draw3LineFromList(Image<Bgr, Byte> img, List<Point> l)
{
LineSegment2D line1 = new LineSegment2D(l[0], l[1]);
LineSegment2D line2 = new LineSegment2D(l[2], l[3]);
LineSegment2D line3 = new LineSegment2D(l[4], l[5]);
img.Draw(line1, new Bgr(255, 0, 0), 3);
img.Draw(line2, new Bgr(0, 255, 0), 3);
img.Draw(line3, new Bgr(0, 0, 255), 3);
}
示例5: draw4ContourAndCircle
public static void draw4ContourAndCircle(Image<Bgr, Byte> img, Contour<Point> contour)
{
img.Draw(contour, new Bgr(255, 0, 0), 3);
for (int i = 0; i < contour.Total; i++)
{
PointF pkt = new PointF(contour[0].X,
contour[0].Y);
img.Draw(new CircleF(pkt, 4), new Bgr(i*50, i*50, 250), 4);
}
}
示例6: Run
static void Run()
{
float maxValue = 600;
#region create random points in the range of [0, maxValue]
PointF[] pts = new PointF[20];
Random r = new Random((int)(DateTime.Now.Ticks & 0x0000ffff));
for (int i = 0; i < pts.Length; i++)
pts[i] = new PointF((float)r.NextDouble() * maxValue, (float)r.NextDouble() * maxValue);
#endregion
Triangle2DF[] delaunayTriangles;
VoronoiFacet[] voronoiFacets;
using (PlanarSubdivision subdivision = new PlanarSubdivision(pts))
{
//Obtain the delaunay's triangulation from the set of points;
delaunayTriangles = subdivision.GetDelaunayTriangles();
//Obtain the voronoi facets from the set of points
voronoiFacets = subdivision.GetVoronoiFacets();
}
//create an image for display purpose
Image<Bgr, Byte> img = new Image<Bgr, byte>((int)maxValue, (int) maxValue);
//Draw the voronoi Facets
foreach (VoronoiFacet facet in voronoiFacets)
{
Point[] points = Array.ConvertAll<PointF, Point>(facet.Vertices, Point.Round);
//Draw the facet in color
img.FillConvexPoly(
points,
new Bgr(r.NextDouble() * 120, r.NextDouble() * 120, r.NextDouble() * 120)
);
//highlight the edge of the facet in black
img.DrawPolyline(points, true, new Bgr(Color.Black), 2);
//draw the points associated with each facet in red
img.Draw(new CircleF(facet.Point, 5.0f), new Bgr(Color.Red), 0);
}
//Draw the Delaunay triangulation
foreach (Triangle2DF triangles in delaunayTriangles)
{
img.Draw(triangles, new Bgr(Color.White), 1);
}
//display the image
ImageViewer.Show(img, "Plannar Subdivision");
}
示例7: createFirstHypothesis
public static FeatureVector createFirstHypothesis(Image<Bgr, Byte> imagen)
{
List<PointF> fingertips = new List<PointF>();
fingertips.Add(new PointF(110,220));
fingertips.Add(new PointF(175, 60));
fingertips.Add(new PointF(270, 4));
fingertips.Add(new PointF(410, 26));
fingertips.Add(new PointF(640, 200));
PointF punto = new PointF(400, 400);
//List<PointF> newFingertips = new List<PointF>();
List<float> angles = calculateFingerAngles(fingertips, punto);
FeatureVector vector = new FeatureVector(fingertips, angles, punto, 5);
//dibujar punto central mano
PointF puntoC = new PointF(400, 400);
Point punt = new Point(400, 400);
CircleF centerCircle = new CircleF(puntoC, 5f);
imagen.Draw(centerCircle, new Bgr(Color.Brown), 3);
foreach (PointF p in fingertips)
{
CircleF circle = new CircleF(p, 5f);
imagen.Draw(circle, new Bgr(Color.Red), 3);
Point pun = new Point(int.Parse(p.X.ToString()), int.Parse(p.Y.ToString()));
LineSegment2D lineaDedoCentro = new LineSegment2D(pun, punt);
imagen.Draw(lineaDedoCentro, new Bgr(Color.Green), 2);
}
Point p1 = new Point(int.Parse((puntoC.X - 90).ToString()), int.Parse((puntoC.Y - 90).ToString()));
Point p2 = new Point(int.Parse((puntoC.X - 90).ToString()), int.Parse((puntoC.Y + 90).ToString()));
Point p3 = new Point(int.Parse((puntoC.X + 90).ToString()), int.Parse((puntoC.Y - 90).ToString()));
Point p4 = new Point(int.Parse((puntoC.X + 90).ToString()), int.Parse((puntoC.Y + 90).ToString()));
LineSegment2D line = new LineSegment2D(p1, p2);
LineSegment2D line1 = new LineSegment2D(p1, p3);
LineSegment2D line2 = new LineSegment2D(p3, p4);
LineSegment2D line3 = new LineSegment2D(p2, p4);
imagen.Draw(line, new Bgr(Color.Brown), 3);
imagen.Draw(line1, new Bgr(Color.Brown), 3);
imagen.Draw(line2, new Bgr(Color.Brown), 3);
imagen.Draw(line3, new Bgr(Color.Brown), 3);
return vector;
}
示例8: Run
static void Run()
{
Image<Bgr, Byte> image = new Image<Bgr, byte>("lena.jpg"); //Read the files as an 8-bit Bgr image
Image<Gray, Byte> gray = image.Convert<Gray, Byte>(); //Convert it to Grayscale
Stopwatch watch = Stopwatch.StartNew();
//normalizes brightness and increases contrast of the image
gray._EqualizeHist();
//Read the HaarCascade objects
HaarCascade face = new HaarCascade("haarcascade_frontalface_alt_tree.xml");
HaarCascade eye = new HaarCascade("haarcascade_eye.xml");
//Detect the faces from the gray scale image and store the locations as rectangle
//The first dimensional is the channel
//The second dimension is the index of the rectangle in the specific channel
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.1,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
foreach (MCvAvgComp f in facesDetected[0])
{
//draw the face detected in the 0th (gray) channel with blue color
image.Draw(f.rect, new Bgr(Color.Blue), 2);
//Set the region of interest on the faces
gray.ROI = f.rect;
MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
eye,
1.1,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
gray.ROI = Rectangle.Empty;
foreach (MCvAvgComp e in eyesDetected[0])
{
Rectangle eyeRect = e.rect;
eyeRect.Offset(f.rect.X, f.rect.Y);
image.Draw(eyeRect, new Bgr(Color.Red), 2);
}
}
watch.Stop();
//display the image
ImageViewer.Show(image, String.Format("Perform face and eye detection in {0} milliseconds", watch.ElapsedMilliseconds));
}
示例9: GenerateFilterMask
/// <summary>
/// generate a sharp filter
/// </summary>
/// <param name="size"></param>
/// <param name="isHighPass"></param>
/// <param name="width"></param>
/// <returns></returns>
internal static Image<Gray, float> GenerateFilterMask(Size size, bool isHighPass, int width)
{
Image<Gray, float> mask = new Image<Gray, float>(size);
if (isHighPass)
{
mask.SetValue(1);
mask.Draw(new CircleF(new PointF(size.Width / 2, size.Height / 2), width), new Gray(0), 0);
}
else
{
mask.SetZero();
mask.Draw(new CircleF(new PointF(size.Width / 2, size.Height / 2), width), new Gray(1), 0);
}
return mask;
}
示例10: OrderedPointList
/// <summary>
/// Draw a list of ordered points using circles and numbering.
/// </summary>
/// <param name="image">Image to draw to</param>
/// <param name="points">Points to draw</param>
/// <param name="color">Color to use</param>
public static void OrderedPointList(
Image<Bgr, byte> image,
IEnumerable<PointF> points,
System.Drawing.Color color)
{
Bgr bgr = new Bgr(color);
MCvFont f = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_PLAIN, 0.8, 0.8);
int count = 1;
foreach (PointF point in points) {
image.Draw(new CircleF(point, 4), bgr, 2);
Point p = new Point((int)Math.Round(point.X), (int)Math.Round(point.Y));
image.Draw(count.ToString(), ref f, new System.Drawing.Point(p.X + 5, p.Y - 5), bgr);
count++;
}
}
示例11: ProcessImage
private void ProcessImage(Image<Bgr, byte> image)
{
Stopwatch watch = Stopwatch.StartNew(); // time the detection process
List<Image<Gray, Byte>> stopSignList = new List<Image<Gray, byte>>();
List<Rectangle> stopSignBoxList = new List<Rectangle>();
_stopSignDetector.DetectStopSign(image, stopSignList, stopSignBoxList);
watch.Stop(); //stop the timer
processTimeLabel.Text = String.Format("Stop Sign Detection time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);
panel1.Controls.Clear();
Point startPoint = new Point(10, 10);
for (int i = 0; i < stopSignList.Count; i++)
{
Rectangle rect = stopSignBoxList[i];
AddLabelAndImage(
ref startPoint,
String.Format("Stop Sign [{0},{1}]:", rect.Location.Y + rect.Width / 2, rect.Location.Y + rect.Height / 2),
stopSignList[i]);
image.Draw(rect, new Bgr(Color.Aquamarine), 2);
}
imageBox1.Image = image;
}
示例12: FilterPlate
private static Image<Gray, Byte> FilterPlate(Image<Gray, Byte> plate)
{
Image<Gray, Byte> thresh = plate.ThresholdBinaryInv(new Gray(120), new Gray(255));
Image<Gray, Byte> plateMask = new Image<Gray, byte>(plate.Size);
Image<Gray, Byte> plateCanny = plate.Canny(new Gray(100), new Gray(50));
MemStorage stor = new MemStorage();
{
plateMask.SetValue(255.0);
for (
Contour<Point> contours = plateCanny.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
stor);
contours != null; contours = contours.HNext)
{
Rectangle rect = contours.BoundingRectangle;
if (rect.Height > (plate.Height >> 1))
{
rect.X -= 1; rect.Y -= 1; rect.Width += 2; rect.Height += 2;
rect.Intersect(plate.ROI);
plateMask.Draw(rect, new Gray(0.0), -1);
}
}
thresh.SetValue(0, plateMask);
}
thresh._Erode(1);
thresh._Dilate(1);
return thresh;
}
示例13: ProcessImage
private void ProcessImage(Image<Bgr, byte> image)
{
Stopwatch watch = Stopwatch.StartNew(); // time the detection process
List<Image<Gray, Byte>> licensePlateImagesList = new List<Image<Gray, byte>>();
List<Image<Gray, Byte>> filteredLicensePlateImagesList = new List<Image<Gray, byte>>();
List<MCvBox2D> licenseBoxList = new List<MCvBox2D>();
List<List<Word>> words = _licensePlateDetector.DetectLicensePlate(
image,
licensePlateImagesList,
filteredLicensePlateImagesList,
licenseBoxList);
watch.Stop(); //stop the timer
processTimeLabel.Text = String.Format("License Plate Recognition time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);
panel1.Controls.Clear();
Point startPoint = new Point(10, 10);
for (int i = 0; i < words.Count; i++)
{
AddLabelAndImage(
ref startPoint,
String.Format("License: {0}", String.Join(" ", words[i].ConvertAll<String>(delegate(Word w) { return w.Text; }).ToArray())),
licensePlateImagesList[i].ConcateVertical(filteredLicensePlateImagesList[i]));
image.Draw(licenseBoxList[i], new Bgr(Color.Red), 2);
}
imageBox1.Image = image;
}
示例14: ViewDidLoad
public override void ViewDidLoad()
{
base.ViewDidLoad();
ButtonText = "Detect Pedestrian";
OnButtonClick += delegate
{
long processingTime;
using (Image<Bgr, byte> image = new Image<Bgr, byte>("pedestrian.png"))
{
Rectangle[] pedestrians = FindPedestrian.Find(
image.Mat, false,
out processingTime
);
foreach (Rectangle rect in pedestrians)
{
image.Draw(rect, new Bgr(Color.Red), 1);
}
Size frameSize = FrameSize;
using (Image<Bgr, Byte> resized = image.Resize(frameSize.Width, frameSize.Height, Emgu.CV.CvEnum.Inter.Nearest, true))
{
MessageText = String.Format(
"Detection Time: {0} milliseconds.",
processingTime
);
SetImage(resized);
}
}
};
}
示例15: ViewDidLoad
public override void ViewDidLoad()
{
base.ViewDidLoad();
if (AppDelegate.iOS7Plus)
EdgesForExtendedLayout = UIRectEdge.None;
/*
MCvFont font = new MCvFont(
Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_PLAIN,
1.0,
1.0
);*/
using (Image<Bgr, Byte> image = new Image<Bgr, Byte>(320, 240))
{
image.SetValue(new Bgr(255, 255, 255));
image.Draw(
"Hello, world",
new Point(30, 30),
CvEnum.FontFace.HersheyPlain,
1.0,
new Bgr(0, 255, 0)
);
UIImageView imageView = new UIImageView(image.ToUIImage());
Add(imageView);
}
}