当前位置: 首页>>代码示例>>C#>>正文


C# Image.DrawPolyline方法代码示例

本文整理汇总了C#中Image.DrawPolyline方法的典型用法代码示例。如果您正苦于以下问题:C# Image.DrawPolyline方法的具体用法?C# Image.DrawPolyline怎么用?C# Image.DrawPolyline使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Image的用法示例。


在下文中一共展示了Image.DrawPolyline方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: ProcessFrame

        private void ProcessFrame(object sender, EventArgs arg)
        {
            frame = capture.QueryFrame();


            if (frame != null)
            {

                // add cross hairs to image
                int totalwidth = frame.Width;
                int totalheight = frame.Height;
                PointF[] linepointshor = new PointF[] { 
                    new PointF(0, totalheight/2),
                    new PointF(totalwidth, totalheight/2)
                  
                };
                PointF[] linepointsver = new PointF[] { 
                    new PointF(totalwidth/2, 0),
                    new PointF(totalwidth/2, totalheight)
                  
                };

                frame.DrawPolyline(Array.ConvertAll<PointF, System.Drawing.Point>(linepointshor, System.Drawing.Point.Round), false, new Bgr(System.Drawing.Color.AntiqueWhite), 1);
                frame.DrawPolyline(Array.ConvertAll<PointF, System.Drawing.Point>(linepointsver, System.Drawing.Point.Round), false, new Bgr(System.Drawing.Color.AntiqueWhite), 1);




            }
            CapturedImageBox.Image = frame;

        }
开发者ID:glocklueng,项目名称:ModernUI-Pick-and-Place-Controller-Software,代码行数:32,代码来源:CameraWindow.xaml.cs

示例2: Process

        public DetectorResult Process(Image<Bgr, byte> rawFrame, Image<Gray, byte> grayFrame)
        {
            Image<Bgr, byte> contourImage = null;
            if (rawFrame != null)
            {
                List<Point[]> polygon = new List<Point[]>();      // to draw the perimeter

                Image<Gray, byte> gray = rawFrame.Convert<Gray, byte>();               // convert source to gray
                Image<Gray, byte> thresh = gray.PyrDown().PyrUp();                  // attempt to make edges more distinct?

                using (Image<Gray, Byte> mask = new Image<Gray, byte>(thresh.Size))
                using (Image<Gray, byte> cannyImg = thresh.Canny(new Gray(10), new Gray(50)))
                using (Image<Gray, byte> dilateImg = cannyImg.Dilate(1))

                using (MemStorage stor = new MemStorage())
                {
                    mask.SetValue(255.0);
                    for (
                       Contour<Point> contours = dilateImg.FindContours(
                          Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                          Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
                          stor);
                       contours != null; contours = contours.HNext)
                    {
                        Rectangle rect = contours.BoundingRectangle;
                        int area = rect.Height * rect.Width;
                        if (area > 30000)
                        {
                            rect.X -= 1; rect.Y -= 1; rect.Width += 2; rect.Height += 2;
                            rect.Intersect(gray.ROI);
                            mask.Draw(rect, new Gray(0.0), -1);

                            polygon.Add(contours.ToArray());
                        }
                    }

                    thresh.SetValue(0, mask);
                }

                contourImage = new Image<Bgr, byte>(gray.Bitmap);
                contourImage.CopyBlank();

                foreach (Point[] points in polygon)
                    contourImage.DrawPolyline(points, true, new Bgr(Color.Red), 2);
            }
            var result = new DetectorResult()
                             {
                                 Confidence = 100,
                                 GrayImage = grayFrame,
                                 ProcessedImage = contourImage,
                                 RawImage = rawFrame
                             };
            return result;
        }
开发者ID:genecyber,项目名称:PredatorCV,代码行数:54,代码来源:Contour.cs

示例3: Run

      static void Run()
      {
         float maxValue = 600;

         #region create random points in the range of [0, maxValue]
         PointF[] pts = new PointF[20];
         Random r = new Random((int)(DateTime.Now.Ticks & 0x0000ffff));
         for (int i = 0; i < pts.Length; i++)
            pts[i] = new PointF((float)r.NextDouble() * maxValue, (float)r.NextDouble() * maxValue);
         #endregion

         Triangle2DF[] delaunayTriangles;
         VoronoiFacet[] voronoiFacets;
         using (PlanarSubdivision subdivision = new PlanarSubdivision(pts))
         {
            //Obtain the delaunay's triangulation from the set of points;
            delaunayTriangles = subdivision.GetDelaunayTriangles();

            //Obtain the voronoi facets from the set of points
            voronoiFacets = subdivision.GetVoronoiFacets();
         }

         //create an image for display purpose
         Image<Bgr, Byte> img = new Image<Bgr, byte>((int)maxValue, (int) maxValue);

         //Draw the voronoi Facets
         foreach (VoronoiFacet facet in voronoiFacets)
         {
            Point[] points = Array.ConvertAll<PointF, Point>(facet.Vertices, Point.Round);

            //Draw the facet in color
            img.FillConvexPoly(
                points,
                new Bgr(r.NextDouble() * 120, r.NextDouble() * 120, r.NextDouble() * 120)
                );

            //highlight the edge of the facet in black
            img.DrawPolyline(points, true, new Bgr(Color.Black), 2);

            //draw the points associated with each facet in red
            img.Draw(new CircleF(facet.Point, 5.0f), new Bgr(Color.Red), 0);
         }

         //Draw the Delaunay triangulation
         foreach (Triangle2DF triangles in delaunayTriangles)
         {
            img.Draw(triangles, new Bgr(Color.White), 1);
         }

         //display the image
         ImageViewer.Show(img, "Plannar Subdivision");
      }
开发者ID:AnthonyNystrom,项目名称:Pikling,代码行数:52,代码来源:Program.cs

示例4: DrawSet

        private static void DrawSet(Image<Bgr, Byte> table, Dictionary<Card, System.Drawing.Point> cards, Random rnd, List<Card> set)
        {
            Bgr setcolor = new Bgr(rnd.Next(255), rnd.Next(255), rnd.Next(255));
            List<System.Drawing.Point> centers = new List<System.Drawing.Point>();

            foreach (Card card in set)
            {
                System.Drawing.Point p = cards[card];
                PointF center = new PointF(p.X, p.Y);
                centers.Add(p);
                CircleF circle = new CircleF(center, 50);
                table.Draw(circle, setcolor, 2);
            }

            table.DrawPolyline(centers.ToArray(), true, setcolor, 5);
        }
开发者ID:LoyVanBeek,项目名称:SetVision,代码行数:16,代码来源:Window1.xaml.cs

示例5: DrawContours

        public static void DrawContours(ContourNode node, Image<Bgr, Byte> canvas, System.Drawing.Color color)
        {
            Bgr _color = new Bgr(System.Drawing.Color.Red);

            foreach (ContourNode child in node.Children)
            {
                canvas.DrawPolyline(child.Contour.ToArray(), true, _color, 1);

                if (node.Shape != null)
                {
                    MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_PLAIN, 1, 1);
                    canvas.Draw(child.Shape + child.Color.ToString(),
                        ref font,
                        child.Contour[0],
                        new Bgr(System.Drawing.Color.Red)
                        );
                }

                DrawContours(child, canvas, color);
            }
        }
开发者ID:LoyVanBeek,项目名称:SetVision,代码行数:21,代码来源:ContourAnalyzer.cs

示例6: PreProcess

        public override IDataContainer PreProcess(IDataContainer dataContainer)
        {
            const int width = 1280;
            const int height = 720;
            var image = new Image<Rgb, byte>(width, height);

            foreach (var blob in dataContainer.OfType<BlobData>())
            {
                var polyline = new List<Point>();
                foreach (var point in blob.Polygon.Points)
                {
                    var x = point.X * width;
                    var y = point.Y * height;

                    polyline.Add(new Point((int)x, (int)y));
                }

                var color = Rgbs.White;
                if (typeof(RectangleTracker) == blob.Source.GetType())
                    color = Rgbs.Red;
                else if (typeof(RectangleTrackerColor) == blob.Source.GetType())
                    color = Rgbs.Yellow;

                var centerX = (int)(blob.Center.X * width);
                var centerY = (int)(blob.Center.Y * height);

                image.DrawPolyline(polyline.ToArray(), true, color, 5);
                image.Draw(string.Format("Id {0}", blob.Id), ref EmguFontBig, new Point(centerX, centerY), Rgbs.White);
            }

            Stage(new RgbImageData(this, "BlobRenderer", image.Copy()));
            Push();

            image.Dispose();

            return null;
        }
开发者ID:AlternateIf,项目名称:huddle-engine,代码行数:37,代码来源:BlobRenderer.cs

示例7: Draw

        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Image<Bgr, Byte> Draw(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime)
        {
            HomographyMatrix homography;
             VectorOfKeyPoint modelKeyPoints;
             VectorOfKeyPoint observedKeyPoints;
             Matrix<int> indices;
             Matrix<byte> mask;

             FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, out indices, out mask, out homography);

             //Draw the matched keypoints
             Image<Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
            indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);
             //ImageViewer.Show(modelImage, "modelImage");
             //ImageViewer.Show(observedImage, "observedImage");
             //ImageViewer.Show(result, "result");
             Image<Bgr, Byte> brg = new Image<Bgr,byte>(observedImage.Bitmap);
             #region draw the projected region on the image
             if (homography != null)
             {  //draw a rectangle along the projected model
            Rectangle rect = modelImage.ROI;
            PointF[] pts = new PointF[] {
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};
            homography.ProjectPoints(pts);

            result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Blue), 5);
            brg.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Blue), 5);
            //ImageViewer.Show(brg, "brg");
             }
             #endregion

             return result;
        }
开发者ID:Huong-nt,项目名称:SUFT-detecttion-EMGU,代码行数:43,代码来源:DrawMatches.cs

示例8: Run

      static void Run()
      {
         Image<Gray, Byte> modelImage = new Image<Gray, byte>("box.png");

         #region extract features from the object image
         MCvSURFParams param1 = new MCvSURFParams(500, false);
         SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref param1);
         SURFFeature[] modelFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
         SURFFeature[] modelFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });

         //Create feature trees for the given features
         FeatureTree featureTreePositiveLaplacian = new FeatureTree(
            Array.ConvertAll<SURFFeature, Matrix<float>>(
               modelFeaturesPositiveLaplacian,
               delegate(SURFFeature f) { return f.Descriptor; }));
         FeatureTree featureTreeNegativeLaplacian = new FeatureTree(
            Array.ConvertAll<SURFFeature, Matrix<float>>(
               modelFeaturesNegativeLaplacian,
               delegate(SURFFeature f) { return f.Descriptor; }));
         #endregion

         Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");

         #region extract features from the observed image
         MCvSURFParams param2 = new MCvSURFParams(500, false);
         SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref param2);
         SURFFeature[] imageFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
         SURFFeature[] imageFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });
         #endregion

         #region Merge the object image and the observed image into one image for display
         Image<Gray, Byte> res = new Image<Gray, byte>(Math.Max(modelImage.Width, observedImage.Width), modelImage.Height + observedImage.Height);
         res.ROI = new System.Drawing.Rectangle(0, 0, modelImage.Width, modelImage.Height);
         modelImage.Copy(res, null);
         res.ROI = new System.Drawing.Rectangle(0, modelImage.Height, observedImage.Width, observedImage.Height);
         observedImage.Copy(res, null);
         res.ROI = Rectangle.Empty;
         #endregion

         double matchDistanceRatio = 0.8;
         List<PointF> modelPoints = new List<PointF>();
         List<PointF> observePoints = new List<PointF>();

         #region using Feature Tree to match feature
         Matrix<float>[] imageFeatureDescriptorsPositiveLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
            imageFeaturesPositiveLaplacian,
            delegate(SURFFeature f) { return f.Descriptor; });
         Matrix<float>[] imageFeatureDescriptorsNegativeLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
            imageFeaturesNegativeLaplacian,
            delegate(SURFFeature f) { return f.Descriptor; });
         Matrix<Int32> result1;
         Matrix<double> dist1;

         featureTreePositiveLaplacian.FindFeatures(imageFeatureDescriptorsPositiveLaplacian, out result1, out dist1, 2, 20);
         MatchSURFFeatureWithFeatureTree(
           modelFeaturesPositiveLaplacian,
           imageFeaturesPositiveLaplacian,
           matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);

         featureTreeNegativeLaplacian.FindFeatures(imageFeatureDescriptorsNegativeLaplacian, out result1, out dist1, 2, 20);
         MatchSURFFeatureWithFeatureTree(
              modelFeaturesNegativeLaplacian,
              imageFeaturesNegativeLaplacian,
              matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);
         #endregion

         Matrix<float> homographyMatrix = CameraCalibration.FindHomography(
            modelPoints.ToArray(), //points on the object image
            observePoints.ToArray(), //points on the observed image
            HOMOGRAPHY_METHOD.RANSAC,
            3).Convert<float>();

         #region draw the projected object in observed image
         for (int i = 0; i < modelPoints.Count; i++)
         {
            PointF p = observePoints[i];
            p.Y += modelImage.Height;
            res.Draw(new LineSegment2DF(modelPoints[i], p), new Gray(0), 1);
         }

         System.Drawing.Rectangle rect = modelImage.ROI;
         Matrix<float> orginalCornerCoordinate = new Matrix<float>(new float[,] 
            {{  rect.Left, rect.Bottom, 1.0f},
               { rect.Right, rect.Bottom, 1.0f},
               { rect.Right, rect.Top, 1.0f},
               { rect.Left, rect.Top, 1.0f}});

         Matrix<float> destCornerCoordinate = homographyMatrix * orginalCornerCoordinate.Transpose();
         float[,] destCornerCoordinateArray = destCornerCoordinate.Data;

         Point[] destCornerPoints = new Point[4];
         for (int i = 0; i < destCornerPoints.Length; i++)
         {
            float denominator = destCornerCoordinateArray[2, i];
            destCornerPoints[i] = new Point(
               (int)(destCornerCoordinateArray[0, i] / denominator),
               (int)(destCornerCoordinateArray[1, i] / denominator) + modelImage.Height);
         }

         res.DrawPolyline(destCornerPoints, true, new Gray(255.0), 5);
//.........这里部分代码省略.........
开发者ID:AnthonyNystrom,项目名称:Pikling,代码行数:101,代码来源:SURFFeature.cs

示例9: PickupLargestArea

 //
 private Contour<Point> PickupLargestArea(ref Image<Gray, byte> src)
 {
     Contour<Point> contour =  src.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_EXTERNAL);
     if (contour == null)
     {
         return null;
     }
     double max = 0;
     Contour<Point> largest = contour;
     //最大領域を取得
     while (contour != null)
     {
         if (contour.Area > max)
         {
             max = contour.Area;
             largest = contour;
         }
         contour = contour.HNext;
     }
     
     //src内の最大領域のみを抽出
     src.SetZero();
     src.DrawPolyline(largest.ApproxPoly(13).ToArray(), true, new Gray(255), -1);
     return largest.ApproxPoly(13);
 }
开发者ID:jgmanz,项目名称:asial-arbeit,代码行数:26,代码来源:Form1.cs

示例10: CalculateConvexityDefacts

        private Image<Gray, Byte> CalculateConvexityDefacts(Image<Gray, Byte> image)
        {
            Gray cannyThreshold = new Gray(80);
            Gray cannyThresholdLinking = new Gray(80);

            //image = image.Canny(cannyThreshold, cannyThresholdLinking);
            image = image.ThresholdBinary(cannyThreshold, cannyThresholdLinking);
            //image = image.Erode(1);
            //image = image.SmoothBilatral(1, 1, 1);
            //image = image.SmoothMedian(5);
            //image = image.SmoothBlur(1,1);
            using (MemStorage storage = new MemStorage())
            {

                Contour<Point> contours = image.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage);
                Contour<Point> biggestContour = null;

                Double Result1 = 0;
                Double Result2 = 0;

                //takes the biggest contour to track (not really relevant if u paint only the hand.)
                while (contours != null)
                {
                    Result1 = contours.Area;
                    if (Result1 > Result2)
                    {
                        Result2 = Result1;
                        biggestContour = contours;
                    }
                    contours = contours.HNext;
                }
                double contourArea = biggestContour.Area;

                if (biggestContour != null)
                {
                    //Drawing the contour of the hand
                    Contour<Point> currentContour = biggestContour.ApproxPoly(biggestContour.Perimeter * 0.0025, storage);
                    image.Draw(currentContour, new Gray(250), 1);

                    biggestContour = currentContour;

                    hull = biggestContour.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);
                    box = biggestContour.GetMinAreaRect();

                    PointF[] points = box.GetVertices();
                    Point[] ps = new Point[points.Length];

                    for (int i = 0; i < points.Length; i++)
                    {
                        ps[i] = new Point((int)points[i].X, (int)points[i].Y);
                    }

                    image.DrawPolyline(hull.ToArray(), true, new Gray(255), 1);
                    image.Draw(new CircleF(new PointF(box.center.X, box.center.Y), 2), new Gray(255), 1);

                    filteredHull = new Seq<Point>(storage);
                    for (int i = 0; i < hull.Total; i++)
                    {
                        if (Math.Sqrt(Math.Pow(hull[i].X - hull[i + 1].X, 2) + Math.Pow(hull[i].Y - hull[i + 1].Y, 2)) > box.size.Width / 10)
                        {
                            filteredHull.Push(hull[i]);
                        }
                    }

                    defects = biggestContour.GetConvexityDefacts(storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);
                    defectArray = defects.ToArray();
                }
            }
            return image;
        }
开发者ID:faddison,项目名称:KMouse,代码行数:70,代码来源:MainWindow.cs

示例11: DrawUnusedRobotPieces

		internal void DrawUnusedRobotPieces(List<BlobInfo> blobInfos)
		{
			m_DetectedBlobsImage = m_GrayImage.CopyBlank();

			int width = 0;
			foreach (BlobInfo blobInfo in blobInfos)
			{
				width++;
				PointF[] pointsF = blobInfo.MinAreaRect.GetVertices();
				Point[] points = new Point[pointsF.Length];

				for (int i = 0; i < points.Length; i++)
				{
					points[i] = new Point(
						(int)pointsF[i].X,
						(int)pointsF[i].Y);
				}

				m_DetectedBlobsImage.DrawPolyline(points, true, new Gray(255), width);
			}
		}
开发者ID:eldb2,项目名称:robotic-tic-tac-toe-lynxmotion,代码行数:21,代码来源:ImageProcessor.cs

示例12: FindBlobsAndDraw

		private void FindBlobsAndDraw(Image<Gray, Byte> blackAndWhiteImage)
		{
			m_BlobInfos.Clear();
			m_DetectedBlobsImage = m_ClippedImage.CopyBlank();

			using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
			{
				int width = 0;
				for (Contour<Point> contours = blackAndWhiteImage.FindContours(
					Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
					Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST,
					storage);
					contours != null;
					contours = contours.HNext)
				{
					Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage);
					//Debug.WriteLine(currentContour.Area);
					m_BlobInfos.Add(new BlobInfo(currentContour));

					width++;
					m_DetectedBlobsImage.DrawPolyline(currentContour.ToArray(), true, new Bgr(Color.White), width);
				}
			}
		}
开发者ID:eldb2,项目名称:robotic-tic-tac-toe-lynxmotion,代码行数:24,代码来源:MainFormModel.cs

示例13: TestConvexHull

        public void TestConvexHull()
        {
            #region Create some random points
             Random r = new Random();
             PointF[] pts = new PointF[200];
             for (int i = 0; i < pts.Length; i++)
             {
            pts[i] = new PointF((float)(100 + r.NextDouble() * 400), (float)(100 + r.NextDouble() * 400));
             }
             #endregion

             Image<Bgr, Byte> img = new Image<Bgr, byte>(600, 600, new Bgr(255.0, 255.0, 255.0));
             //Draw the points
             foreach (PointF p in pts)
            img.Draw(new CircleF(p, 3), new Bgr(0.0, 0.0, 0.0), 1);

             //Find and draw the convex hull
             using (MemStorage storage = new MemStorage())
             {
            Stopwatch watch = Stopwatch.StartNew();
            PointF[] hull = PointCollection.ConvexHull(pts, storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE).ToArray();
            watch.Stop();
            img.DrawPolyline(
                Array.ConvertAll<PointF, Point>(hull, Point.Round),
                true, new Bgr(255.0, 0.0, 0.0), 1);

            //ImageViewer.Show(img, String.Format("Convex Hull Computed in {0} milliseconds", watch.ElapsedMilliseconds));
             }
        }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:29,代码来源:AutoTestVarious.cs

示例14: MainLoop

        private void MainLoop()
        {
            CurrentFrame = Cam.QueryFrame().Convert<Hsv, byte>();
            HandImage = new Image<Gray, byte>(CurrentFrame.Size);
            while (!IsDisposed)
            {
                CurrentFrame = Cam.QueryFrame().Convert<Hsv, byte>();
                HandImage.SetZero();

                //肌色領域の抽出
                ExtractSkinColor(CurrentFrame, ref HandImage);

                //ノイズ除去
                HandImage.Erode(20);
                HandImage.Dilate(20);

                imageBox2.Image = HandImage;

                //手の輪郭を抽出し、ジャンケンの手を算出
                Contour<Point> hand_contour = PickupLargestArea(ref HandImage);
                Hands hand = DecideHandFromDefact(hand_contour);
                string msg = "";
                switch (hand)
                {
                    case Hands.PAPER:
                        msg = "パー";
                        break;
                    case Hands.ROCK:
                        msg = "グー";
                        break;
                    case Hands.SCISSORS:
                        msg = "チョキ";
                        break;
                    case Hands.UNKNOWN:
                        msg = "不明。。。";
                        break;
                }

                this.Invoke(new MethodInvoker(delegate() {
                    if (!this.IsDisposed) {
                            textBox_Msg.Text = msg;
                            UpdateParams();
                    }
                }));

                if (hand_contour == null)
                {
                    continue;
                }
                CurrentFrame.DrawPolyline(hand_contour.ToArray(), true, new Hsv(255, 255, 255), 2);
                CurrentFrame.DrawPolyline(hand_contour.GetConvexHull(ORIENTATION.CV_CLOCKWISE).ToArray(), true, new Hsv(50, 100, 50), 1);
                imageBox1.Image = CurrentFrame;

            }
        }
开发者ID:jgmanz,项目名称:asial-arbeit,代码行数:55,代码来源:Form1.cs

示例15: DrawBlobs

		public void DrawBlobs(Image<Bgr, byte> image,
				BlobObject[] blobs,
				bool fill,
				bool drawBoundingBox, Color BoundingBoxColor,
				bool drawConvexHull, Color ConvexHullColor,
				bool drawEllipse, Color EllipseColor,
				bool drawCentroid, Color CentroidColor,
				bool drawAngle, Color AngleColor)
		{
			Random r = new Random(0);
			foreach (var b in blobs)
			{
				if (fill)
					b.FillBlob(image.Ptr, new MCvScalar(r.Next(255), r.Next(255), r.Next(255), r.Next(255)));
				if (drawBoundingBox)
					image.Draw(b.BoundingBox, new Bgr(BoundingBoxColor), 1);
				if (drawConvexHull)
					image.DrawPolyline(b.ConvexHull, true, new Bgr(ConvexHullColor), 1);
				if (drawEllipse)
					image.Draw(b.BestFitEllipse, new Bgr(EllipseColor), 1);

				if (drawCentroid)
				{
					image.Draw(new LineSegment2D(new Point((int)b.CentroidX - 4, (int)b.CentroidY),
										   new Point((int)b.CentroidX + 4, (int)b.CentroidY)),
						 new Bgr(CentroidColor), 1);
					image.Draw(new LineSegment2D(new Point((int)b.CentroidX, (int)b.CentroidY - 4),
										   new Point((int)b.CentroidX, (int)b.CentroidY + 4)),
						 new Bgr(CentroidColor), 1);
				}
				if (drawAngle)
				{
					double x1, x2, y1, y2;
					x1 = b.CentroidX - 0.005 * b.Area * Math.Cos(b.Angle);
					y1 = b.CentroidY - 0.005 * b.Area * Math.Sin(b.Angle);
					x2 = b.CentroidX + 0.005 * b.Area * Math.Cos(b.Angle);
					y2 = b.CentroidY + 0.005 * b.Area * Math.Sin(b.Angle);
					image.Draw(new LineSegment2D(new Point((int)x1, (int)y1),
									new Point((int)x2, (int)y2)),
						 new Bgr(AngleColor), 1);
				}
			}
		}
开发者ID:vegazrelli,项目名称:GazeTracker2.0,代码行数:43,代码来源:BlobResult.cs


注:本文中的Image.DrawPolyline方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。