当前位置: 首页>>代码示例>>C#>>正文


C# Image.FillConvexPoly方法代码示例

本文整理汇总了C#中Image.FillConvexPoly方法的典型用法代码示例。如果您正苦于以下问题:C# Image.FillConvexPoly方法的具体用法?C# Image.FillConvexPoly怎么用?C# Image.FillConvexPoly使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Image的用法示例。


在下文中一共展示了Image.FillConvexPoly方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: VerticalProject

 public static Image<Gray, Byte> VerticalProject(Image<Gray, Byte> img)
 {
     int[] counter = new int[img.Width];
     for (int i = 0; i < img.Height; ++i) {
         for (int j = 0; j < img.Width; ++j) {
             if (img[i, j].Equals(ColorDefination.ForeColor)) {
                 ++counter[j];
             }
         }
     }
     int max = counter.Max(), height = max * Scale;
     Image<Gray, Byte> result = new Image<Gray, byte>(img.Width, height + 1);
     for (int i = 0; i < counter.Length; ++i) {
         int h = counter[i] * Scale;
         if (h == 0) continue;
         Point[] points = new Point[] {
             new Point(i, height - h),
             new Point(i + 1, height - h),
             new Point(i, height),
             new Point(i + 1, height)
         };
         result.FillConvexPoly(points, new Gray(255));
     }
     return result;
 }
开发者ID:pakerliu,项目名称:sharp-context,代码行数:25,代码来源:Projecting.cs

示例2: FloodFillTest

        public void FloodFillTest()
        {
            // Create a Square
            Point[] square = new Point[4];
            square[0] = new Point(25, 25);
            square[1] = new Point(75, 25);
            square[2] = new Point(75, 75);
            square[3] = new Point(25, 75);

            // Create an Original Image
            var original = new Image<Bgr, Byte>(100, 100, new Bgr(255, 0, 0));
            original.FillConvexPoly(square, new Bgr(Color.Green));

            // Create an Expected Output Image
            var expected = new Image<Bgr, Byte>(100, 100, new Bgr(Preprocessing.MaskColor));
            expected.FillConvexPoly(square, new Bgr(Color.Green));

            // Perform the Flood fill
            Console.WriteLine("Perform Flood Fill ... ");
            var actual = new Image<Bgr, Byte>(Preprocessing.FloodFill(original.ToBitmap(), 0, 0, 1, new Bgr(255, 0, 0)));

            bool identical = true;
            for (int ii = 0; ii < expected.Width; ii++)
            {
                for (int jj = 0; jj < expected.Height; jj++)
                {
                    identical = identical && (Utility.IsEqual(expected[jj, ii], actual[jj, ii]));
                }
            }

            Assert.IsTrue(identical);
        }
开发者ID:Algorithmix,项目名称:Papyrus,代码行数:32,代码来源:PreprocessingTest.cs

示例3: RepresentativeLuminousityTest

        public void RepresentativeLuminousityTest()
        {
            const int imgSize = 100;
            // Create a Square
            Point[] square = new Point[4];
            square[0] = new Point(25, 25);
            square[1] = new Point(75, 25);
            square[2] = new Point(75, 75);
            square[3] = new Point(25, 75);

            var backgroundColor = new Bgra(0, 0, 0, 0);
            var foregroundColor = new Bgra(255, 255, 255, 255);

            // Create an Original Image
            var original = new Image<Bgra, Byte>(imgSize, imgSize, backgroundColor);
            original.FillConvexPoly(square, foregroundColor);

            // Create an Expected output array
            var expected = new double[imgSize];
            for (int ii = 0; ii < imgSize; ii++)
            {
                if (ii >= 25 && ii <= 75)
                {
                    expected[ii] = Luminousity.Luma(foregroundColor);
                }
                else
                {
                    expected[ii] = Utility.Defaults.Ignore;
                }
            }

            // Perform from the top, left right and bottom
            var actualLeft = Luminousity.RepresentativeLuminousity(original, 1, 5, Direction.FromLeft);
            var actualRight = Luminousity.RepresentativeLuminousity(original, 1, 5, Direction.FromRight);
            var actualTop = Luminousity.RepresentativeLuminousity(original, 1, 5, Direction.FromTop);
            var actualBottom = Luminousity.RepresentativeLuminousity(original, 1, 5, Direction.FromBottom);

            // Check that lengths match
            Assert.IsTrue(actualBottom.Length == expected.Length);
            Assert.IsTrue(actualLeft.Length == expected.Length);
            Assert.IsTrue(actualRight.Length == expected.Length);
            Assert.IsTrue(actualTop.Length == expected.Length);

            // Check that the values match
            for (int ii = 0; ii < expected.Length; ii++)
            {
                Assert.IsTrue(Math.Abs(actualBottom[ii] - expected[ii]) < 0.001);
                Assert.IsTrue(Math.Abs(actualLeft[ii] - expected[ii]) < 0.001);
                Assert.IsTrue(Math.Abs(actualRight[ii] - expected[ii]) < 0.001);
                Assert.IsTrue(Math.Abs(actualTop[ii] - expected[ii]) < 0.001);
            }
            Console.WriteLine("Luminousity Scanning Tests Succesful!");
        }
开发者ID:Algorithmix,项目名称:Papyrus,代码行数:53,代码来源:LuminousityTest.cs

示例4: Run

      static void Run()
      {
         float maxValue = 600;

         #region create random points in the range of [0, maxValue]
         PointF[] pts = new PointF[20];
         Random r = new Random((int)(DateTime.Now.Ticks & 0x0000ffff));
         for (int i = 0; i < pts.Length; i++)
            pts[i] = new PointF((float)r.NextDouble() * maxValue, (float)r.NextDouble() * maxValue);
         #endregion

         Triangle2DF[] delaunayTriangles;
         VoronoiFacet[] voronoiFacets;
         using (PlanarSubdivision subdivision = new PlanarSubdivision(pts))
         {
            //Obtain the delaunay's triangulation from the set of points;
            delaunayTriangles = subdivision.GetDelaunayTriangles();

            //Obtain the voronoi facets from the set of points
            voronoiFacets = subdivision.GetVoronoiFacets();
         }

         //create an image for display purpose
         Image<Bgr, Byte> img = new Image<Bgr, byte>((int)maxValue, (int) maxValue);

         //Draw the voronoi Facets
         foreach (VoronoiFacet facet in voronoiFacets)
         {
            Point[] points = Array.ConvertAll<PointF, Point>(facet.Vertices, Point.Round);

            //Draw the facet in color
            img.FillConvexPoly(
                points,
                new Bgr(r.NextDouble() * 120, r.NextDouble() * 120, r.NextDouble() * 120)
                );

            //highlight the edge of the facet in black
            img.DrawPolyline(points, true, new Bgr(Color.Black), 2);

            //draw the points associated with each facet in red
            img.Draw(new CircleF(facet.Point, 5.0f), new Bgr(Color.Red), 0);
         }

         //Draw the Delaunay triangulation
         foreach (Triangle2DF triangles in delaunayTriangles)
         {
            img.Draw(triangles, new Bgr(Color.White), 1);
         }

         //display the image
         ImageViewer.Show(img, "Plannar Subdivision");
      }
开发者ID:AnthonyNystrom,项目名称:Pikling,代码行数:52,代码来源:Program.cs

示例5: DetectBackgroundTest

        public void DetectBackgroundTest()
        {
            // Create a Square
            Point[] shred = new Point[4];
            shred[0] = new Point(0,0);
            shred[1] = new Point(0, 99);
            shred[2] = new Point(10, 99);
            shred[3] = new Point(10, 0);

            // Create an Original Image
            var original = new Image<Bgr, Byte>(100, 100, new Bgr(Color.HotPink));
            original.FillConvexPoly(shred, new Bgr(Color.Gray));

            var expected = new Bgr(Color.HotPink);

            Console.WriteLine("Performing Heuristic Background Detection");
            var actual = Heuristics.DetectBackground(original.ToBitmap());
            Assert.IsTrue( Picasso.Utility.IsEqual(expected,actual) );
        }
开发者ID:Algorithmix,项目名称:Picasso,代码行数:19,代码来源:HeuristicsTest.cs

示例6: DisplayTestShred

        public static void DisplayTestShred()
        {
            //Create original Square
            Point[] patch1 = new Point[4];
            patch1[0] = new Point(0, 0);
            patch1[1] = new Point(0, 10);
            patch1[2] = new Point(99, 10);
            patch1[3] = new Point(99, 0);

            Point[] patch2 = new Point[4];
            patch2[0] = new Point(0, 50);
            patch2[1] = new Point(0, 60);
            patch2[2] = new Point(99, 60);
            patch2[3] = new Point(99, 50);

            // Create a Tester Square to compare
            Point[] patch3 = new Point[4];
            patch3[0] = new Point(0, 110);
            patch3[1] = new Point(0, 120);
            patch3[2] = new Point(99, 120);
            patch3[3] = new Point(99, 110);

            Point[] patch4 = new Point[4];
            patch4[0] = new Point(0, 160);
            patch4[1] = new Point(0, 170);
            patch4[2] = new Point(99, 170);
            patch4[3] = new Point(99, 160);

            // Create an Original Image
            var original = new Image<Bgr, Byte>(100, 100, new Bgr(Color.HotPink));
            original.FillConvexPoly(patch1, new Bgr(Color.Gray));
            original.FillConvexPoly(patch2, new Bgr(Color.Gray));

            //Create Image to compare with
            var tester = new Image<Bgr, Byte>(100, 200, new Bgr(Color.HotPink));
            tester.FillConvexPoly(patch3, new Bgr(Color.Gray));
            tester.FillConvexPoly(patch4, new Bgr(Color.Gray));

            //ImageViewer display = new ImageViewer(original, "TestBitmap");
            //display.ShowDialog();
            //ImageViewer display2 = new ImageViewer(tester, "Test Image");
            //display2.ShowDialog();

            const string filepath = "originalshrd.bmp";
            const string filepath2 = "testshred.bmp";

            if (File.Exists(filepath))
            {
                File.Delete(filepath);
            }

            if (File.Exists(filepath2))
            {
                File.Delete(filepath2);
            }
            original.ToBitmap().Save(filepath);
            tester.ToBitmap().Save(filepath2);
            Shred originalshred = new Shred(filepath);
            Shred testershred = new Shred(filepath2);
            originalshred.VisualizeLuminousity(Direction.FromLeft);
            originalshred.VisualizeThresholded(Direction.FromLeft);
            originalshred.VisualizeChamfers(Direction.FromLeft);
            testershred.VisualizeThresholded(Direction.FromLeft);
            testershred.VisualizeLuminousity(Direction.FromLeft);
            testershred.VisualizeChamfers(Direction.FromRight);
        }
开发者ID:Algorithmix,项目名称:Papyrus,代码行数:66,代码来源:ArtificialShredSample.cs

示例7: ProcessFrame

        private void ProcessFrame(object sender, EventArgs arg)
        {                   
            if(imageFrame != null){
                
                Image<Gray, byte> greyImage = imageFrame.Copy().Convert<Gray, byte>();
                greyImage = greyImage.SmoothMedian(3);
                
                //stopWatch.Start();
                var faces = face.Detect(greyImage,1.3,6,HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT,new Size(120,120),new Size(300,300));
                
                if (faces.Length == 0)
                {
                    var eyeObjects = eyeWithGlass.DetectMultiScale(greyImage, 1.3, 6, minEye, maxEye);
                    if(eyeObjects.Length == 2)
                    {
                        #region comment
                       /*Console.WriteLine("helper");
                        if(eyeObjects[0].X > eyeObjects[1].X)
                        {
                            var temp = eyeObjects[0];
                            eyeObjects[0] = eyeObjects[1];
                            eyeObjects[1] = temp;
                        }
                        int betweeneLength = eyeObjects[1].X - eyeObjects[0].X;
                        int middleposition = eyeObjects[0].X + ((betweeneLength + eyeObjects[1].Width )/ 2);
                        int forheadpeak = (int)(0.8 * betweeneLength);//
                        int forheadpeakpeak = (int)(0.7 * betweeneLength);//
                        int forheadbelowpeak = (int)(0.4 * betweeneLength);
                        int foreheadpoint = (int)(0.6 * betweeneLength);
                        int neareyebrowpoint = (int)(0.2 * betweeneLength);
                        int lefteyebrowpoint = eyeObjects[0].X;//
                        int righteyebrowpoint = eyeObjects[0].X + betweeneLength + eyeObjects[1].Width;//
                        //int nosepoint =
                        int xx = (int)((5.0 / 12.0) * betweeneLength);
                        int margin = (int)((1.5 / 8.0) * betweeneLength);

                        int x1 = (int)((1.0 / 16.0) * betweeneLength);

                        
                        int round = 3;
                        int around = round-2;
                        double tempcolor = 0;
                        for (int i = 0; i<round; i++)
                        {
                            for (int j = 0; j < round; j++)
                            {
                                tempcolor += greyImage[middleposition - around + i, eyeObjects[0].Y + forheadpeak - around + j].Intensity;
                            }
                        }
                        Gray skincolor = new Gray(tempcolor/(round*round));
                        Point[] p = new Point[7];
                        p[0] = new Point(middleposition, eyeObjects[0].Y - forheadpeak);

                        p[1] = new Point(eyeObjects[0].X + (eyeObjects[0].Width / 2), eyeObjects[0].Y - forheadpeakpeak);
                        p[2] = new Point(eyeObjects[0].X - x1, eyeObjects[0].Y - forheadbelowpeak);
                        p[3] = new Point(lefteyebrowpoint - margin, eyeObjects[0].Y + (eyeObjects[0].Height/6));

                        p[4] = new Point(righteyebrowpoint + margin, eyeObjects[0].Y + (eyeObjects[0].Height /6));
                        p[5] = new Point(righteyebrowpoint + x1, eyeObjects[0].Y - forheadbelowpeak);
                        p[6] = new Point(eyeObjects[1].X + (eyeObjects[1].Width / 2), eyeObjects[0].Y - forheadpeakpeak);

                        //imageFrame.Draw(new Rectangle(new Point(eyeObjects[0].X, eyeObjects[0].Y), new Size(betweeneLength + eyeObjects[1].Width, eyeObjects[0].Height)), new Bgr(Color.Aqua), 2);
                        //imageFrame.Draw(new CircleF(new PointF(middleposition,eyeObjects[0].Y+ foreheadpoint), 1), new Bgr(Color.Yellow), 2);
                        //imageFrame.Draw(new CircleF(new PointF(middleposition,eyeObjects[0].Y - forheadpeak), 1), new Bgr(Color.Yellow), 2);
                        //imageFrame.Draw(new CircleF(new PointF(middleposition, eyeObjects[0].Y - neareyebrowpoint), 1), new Bgr(Color.Gold), 2);
                        //imageFrame.Draw(new CircleF(new PointF(lefteyebrowpoint - margin, eyeObjects[0].Y), 1), new Bgr(Color.AliceBlue), 2);
                        //imageFrame.Draw(new CircleF(new PointF(righteyebrowpoint + margin, eyeObjects[0].Y), 1), new Bgr(Color.AliceBlue), 2);
                        //imageFrame.Draw(new CircleF(new PointF(lefteyebrowpoint, eyeObjects[0].Y - neareyebrowpoint), 1), new Bgr(Color.LimeGreen), 2);
                        //imageFrame.Draw(new CircleF(new PointF(righteyebrowpoint, eyeObjects[0].Y - neareyebrowpoint), 1), new Bgr(Color.LimeGreen), 2);
                        //imageFrame.DrawPolyline(p,true, new Bgr(Color.Azure), 2);
                        greyImage.FillConvexPoly(p, skincolor);
                        //imageFrame.Draw(new CircleF(new PointF(eyeObjects[0].X - x1, eyeObjects[0].Y - forheadbelowpeak), 1), new Bgr(Color.LimeGreen), 2);
                        //imageFrame.Draw(new CircleF(new PointF(righteyebrowpoint + x1, eyeObjects[0].Y - forheadbelowpeak), 1), new Bgr(Color.LimeGreen), 2);

                        //imageFrame.Draw(new CircleF(new PointF(eyeObjects[0].X + (eyeObjects[0].Width / 2), eyeObjects[0].Y - forheadpeakpeak), 1), new Bgr(Color.LimeGreen), 2);
                        //imageFrame.Draw(new CircleF(new PointF(eyeObjects[1].X + (eyeObjects[1].Width / 2), eyeObjects[0].Y - forheadpeakpeak), 1), new Bgr(Color.LimeGreen), 2);
                        */
                        #endregion
                    }
                    else //not see eye in frame
                    {
                        
                        learningTag = true;
                        name = "Processing...";
                        userid = "Processing...";
                        timestampFlag = true;
                        faceRectangle = Rectangle.Empty;
                        realfaceRectangle = Rectangle.Empty;
                        label2.ForeColor = Color.DeepSkyBlue;
                        label2.Text = "Idle";
                        progressBar1.Value = 0;
                        recogNameResult.Clear();
                        recogDistanceResult.Clear();
                        Console.WriteLine("Clear"); 
                    }
                    
                    ARDisplayFlag = false;
                    
                    
                }
//.........这里部分代码省略.........
开发者ID:pathom2000,项目名称:Face-Rcognition-with-Augmented-Reality,代码行数:101,代码来源:Form1.cs

示例8: ShredChamferSimilarityTest

        public void ShredChamferSimilarityTest()
        {
            //Create original Square
            Point[] patch1 = new Point[4];
            patch1[0] = new Point(0, 0);
            patch1[1] = new Point(0, 10);
            patch1[2] = new Point(99, 10);
            patch1[3] = new Point(99, 0);

            Point[] patch2 = new Point[4];
            patch2[0] = new Point(0, 50);
            patch2[1] = new Point(0, 60);
            patch2[2] = new Point(99, 60);
            patch2[3] = new Point(99, 50);

            // Create a Tester Square to compare
            Point[] patch3 = new Point[4];
            patch3[0] = new Point(0, 100);
            patch3[1] = new Point(0, 110);
            patch3[2] = new Point(99, 110);
            patch3[3] = new Point(99, 100);

            Point[] patch4 = new Point[4];
            patch4[0] = new Point(0, 150);
            patch4[1] = new Point(0, 160);
            patch4[2] = new Point(99, 160);
            patch4[3] = new Point(99, 150);

            // Create an Original Image
            var original = new Image<Bgr, Byte>(100, 100, new Bgr(Color.HotPink));
            original.FillConvexPoly(patch1, new Bgr(Color.Gray));
            original.FillConvexPoly(patch2, new Bgr(Color.Gray));

            //Create Image to compare with
            var tester = new Image<Bgr, Byte>(100, 200, new Bgr(Color.HotPink));
            tester.FillConvexPoly(patch3, new Bgr(Color.Gray));
            tester.FillConvexPoly(patch4, new Bgr(Color.Gray));

            const string filepath = "originalshrd.bmp";
            const string filepath2 = "testshred.bmp";

            // Delete Shred Files
            if (File.Exists(filepath))
            {
                File.Delete(filepath);
            }

            if (File.Exists(filepath2))
            {
                File.Delete(filepath2);
            }

            // Save bitmaps to load as shreds
            original.ToBitmap().Save(filepath);
            tester.ToBitmap().Save(filepath2);

            // Create new shreds
            Shred originalshred = new Shred(filepath);
            Shred testershred = new Shred(filepath2);

            // Run Similarity test
            var actual = MatchData.CompareShred(
                originalshred,
                testershred,
                Direction.FromLeft,
                Orientation.Regular,
                Direction.FromRight,
                Orientation.Regular
                ).Offset;

            const int expected = 100;
            Assert.IsTrue(actual == expected);
        }
开发者ID:Algorithmix,项目名称:Papyrus,代码行数:73,代码来源:ShredTest.cs

示例9: ShredSerializingTest

        public void ShredSerializingTest()
        {
            // Create a Square
            Point[] patch1 = new Point[4];
            patch1[0] = new Point(0, 0);
            patch1[1] = new Point(0, 10);
            patch1[2] = new Point(99, 10);
            patch1[3] = new Point(99, 0);

            Point[] patch2 = new Point[4];
            patch2[0] = new Point(0, 50);
            patch2[1] = new Point(0, 60);
            patch2[2] = new Point(99, 60);
            patch2[3] = new Point(99, 50);

            // Create an Original Image
            var original = new Image<Bgr, Byte>(100, 100, new Bgr(Color.HotPink));
            original.FillConvexPoly(patch1, new Bgr(Color.Gray));
            original.FillConvexPoly(patch2, new Bgr(Color.Gray));

            // Ensure filepaths are clear for writing files
            const string filepath = "shredtest.bmp";
            const string serializedpath = "test.shred";

            if (File.Exists(filepath))
            {
                File.Delete(filepath);
            }

            original.ToBitmap().Save(filepath);
            Assert.IsTrue(File.Exists(filepath));
            Shred myshred = new Shred(filepath);

            if (File.Exists(serializedpath))
            {
                File.Delete(serializedpath);
            }

            // Save and load shred
            Assert.IsFalse(File.Exists(serializedpath));
            Shred.Save(myshred, serializedpath);
            Assert.IsTrue(File.Exists(serializedpath));

            Shred newshred = Shred.Load(serializedpath);
            Assert.IsTrue(newshred.Sparsity[(int) Direction.FromLeft] == myshred.Sparsity[(int) Direction.FromLeft]);
        }
开发者ID:Algorithmix,项目名称:Papyrus,代码行数:46,代码来源:ShredTest.cs

示例10: button2_Click

        private void button2_Click(object sender, EventArgs e)
        {
            string dateTemp = dateTimePicker1.Value.ToString("s");


            if (mydb.checkUserProfile(textBox1.Text, textBox2.Text))
            {
                mydb.InsertUserData(textBox1.Text, textBox2.Text, dateTemp, comboBox1.Text, comboBox2.Text);
            }

            newid = mydb.getUserId(textBox1.Text, textBox2.Text, dateTemp, comboBox1.Text);
            if (newid != 0)
            {
                Image<Gray, byte> darkimage = new Image<Gray, byte>(ROIwidth, ROIheight);
                Image<Gray, byte> cropimage;
                Image<Gray, byte> plainimage = new Image<Gray, byte>(ROIwidth, ROIheight);
                for (int i = 1;i<71 ;i++ )
                {
                    //cropimage = new Image<Gray, byte>(@"E:\ImageTestSet2\" + textBox1.Text + i + ".jpg");
                    cropimage = new Image<Gray, byte>(@"E:\ImageTestSet2\Perapat" + i + ".jpg");
                    if (!cropimage.Equals(darkimage))
                    {
                        

                        
                        plainimage = cropimage.Copy();
                        Point[] pL = new Point[3];
                        Point[] pR = new Point[3];
                        int y0 = 105;
                        int y1 = 174;
                        int x0 = 0;
                        int x1 = 34;
                        int x2 = 105;
                        int x3 = 139;
                        pL[0] = new Point(x0, y0);
                        pL[1] = new Point(x0, y1);
                        pL[2] = new Point(x1, y1);
                        pR[0] = new Point(x3, y0);
                        pR[1] = new Point(x3, y1);
                        pR[2] = new Point(x2, y1);
                        cropimage.FillConvexPoly(pL, new Gray(128));
                        cropimage.FillConvexPoly(pR, new Gray(128));
                        //cropimage = cropimage.SmoothMedian(3);

                        imageBox7.Image = cropimage;     //line 2
                        cropimage.Save(folderPath + tempPath);
                        string dbPath = (folderPath + tempPath).Replace("\\", "/");
                        plainimage.Save(folderPath + tempPath2);
                        string dbPathPlain = (folderPath + tempPath2).Replace("\\", "/");
                        mydb.InsertImageTraining(newid, dbPathPlain, dbPath, true);
                        label6.ForeColor = Color.ForestGreen;
                        label6.Text = "Success";
                        //File.Delete(tempPath);
                        
                        imageBox7.Image = cropimage;
                    }
                    else
                    {
                        label6.ForeColor = Color.Red;
                        label6.Text = "Fail";
                    }
                }
                //eigenRecog.reloadData();
            }
        }
开发者ID:pathom2000,项目名称:Face-Rcognition-with-Augmented-Reality,代码行数:65,代码来源:FormTrain.cs

示例11: TrainFrame

        private void TrainFrame(object sender, EventArgs e)
        {
            
                try
                {
                    imageFrameT = captureT.QueryFrame();

                    Image<Gray, byte> darkimage = new Image<Gray, byte>(ROIwidth, ROIheight);
                    Image<Gray, byte> cropimage = new Image<Gray, byte>(ROIwidth, ROIheight);
                    Image<Gray, byte> plainimage = new Image<Gray, byte>(ROIwidth, ROIheight);
                    //ArrayList pic = new ArrayList();
                    if (imageFrameT != null)
                    {
                        Image<Gray, byte> greyImage = imageFrameT.Copy().Convert<Gray,byte>();


                        var faces = face.Detect(greyImage, 1.3, 6, HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT, new Size(120, 120), new Size(300, 300));
                        if (faces.Length > 0)
                        {
                            label6.ForeColor = Color.Chocolate;
                            label6.Text = "Tracking Face";
                            foreach (var facecount in faces)
                            {
                                facePosition = new Point(facecount.rect.X, facecount.rect.Y);
                                faceRectangleSize = new Size(facecount.rect.Width, facecount.rect.Height);
                                faceRectangle = new Rectangle(facePosition, faceRectangleSize);
                                greyImage.ROI = faceRectangle;
                                var eyeObjects = eyeWithGlass.DetectMultiScale(greyImage, 1.3, 6, minEye, maxEye);
                                greyImage.ROI = Rectangle.Empty;
                                if (eyeObjects.Length == 2)
                                {
                                    Console.WriteLine("eye");
                                    if (eyeObjects[0].X > eyeObjects[1].X)
                                    {
                                        var temp = eyeObjects[0];
                                        eyeObjects[0] = eyeObjects[1];
                                        eyeObjects[1] = temp;
                                    }
                                    int betweeneLength = eyeObjects[1].X - eyeObjects[0].X;
                                    int lefteyebrowpoint = eyeObjects[0].X;//
                                    int righteyebrowpoint = eyeObjects[1].X + eyeObjects[1].Width;//
                                    int margin = (int)((1.5 / 8.0) * betweeneLength);
                                    int neareyebrowpoint = (int)(0.2 * betweeneLength);
                                    int faceheight = (int)(2.3 * betweeneLength);

                                    realFacePosition = new Point(facePosition.X + lefteyebrowpoint - margin, facePosition.Y + eyeObjects[0].Y - neareyebrowpoint);
                                    realfaceRectangleSize = new Size((righteyebrowpoint + margin) - (lefteyebrowpoint - margin), faceheight);
                                    realfaceRectangle = new Rectangle(realFacePosition, realfaceRectangleSize);

                                    greyImage.ROI = realfaceRectangle;

                                    imageFrameT.Draw(realfaceRectangle, new Bgr(Color.LimeGreen), 2);

                                    if (trainflag)
                                    {
                                        //get bigger face in frame
                                        cropimage = greyImage.Resize(ROIwidth, ROIheight, INTER.CV_INTER_LINEAR);

                                        if (!cropimage.Equals(darkimage))
                                        {
                                            cropimage._EqualizeHist();

                                            CvInvoke.cvFastNlMeansDenoising(cropimage, cropimage, 3, 7, 21);
                                            plainimage = cropimage.Copy();
                                            Point[] pL = new Point[3];
                                            Point[] pR = new Point[3];
                                            int y0 = 105;
                                            int y1 = 174;
                                            int x0 = 0;
                                            int x1 = 34;
                                            int x2 = 105;
                                            int x3 = 139;
                                            pL[0] = new Point(x0, y0);
                                            pL[1] = new Point(x0, y1);
                                            pL[2] = new Point(x1, y1);
                                            pR[0] = new Point(x3, y0);
                                            pR[1] = new Point(x3, y1);
                                            pR[2] = new Point(x2, y1);
                                            cropimage.FillConvexPoly(pL, new Gray(128));
                                            cropimage.FillConvexPoly(pR, new Gray(128));
                                            //cropimage = cropimage.SmoothMedian(3);
                                            imageBox7.Image = cropimage;     //line 2
                                            cropimage.Save(folderPath + tempPath);
                                            string dbPath = (folderPath + tempPath).Replace("\\", "/");
                                            plainimage.Save(folderPath + tempPath2);
                                            string dbPathPlain = (folderPath + tempPath2).Replace("\\", "/");
                                            mydb.InsertImageTraining(newid, dbPathPlain, dbPath, true);
                                            label6.ForeColor = Color.ForestGreen;
                                            label6.Text = "Success";
                                            //File.Delete(tempPath);
                                            //eigenRecog.reloadData();
                                            imageBox7.Image = cropimage;
                                            imageCount++;
                                        }
                                        else
                                        {
                                            label6.ForeColor = Color.Red;
                                            label6.Text = "Fail";
                                        }
                                        if (imageCount > 25)
//.........这里部分代码省略.........
开发者ID:pathom2000,项目名称:Face-Rcognition-with-Augmented-Reality,代码行数:101,代码来源:FormTrain.cs

示例12: ProcessDevice

        private void ProcessDevice(Device device, Image<Rgb, byte> colorImage, Image<Gray, byte> grayscaleImage, int width, int height, ref Image<Rgb, byte> debugImage)
        {
            var deviceRoi = CalculateRoiFromNormalizedBounds(device.Area, colorImage);
            deviceRoi = deviceRoi.GetInflatedBy(RoiExpandFactor, colorImage.ROI);

            var imageRoi = colorImage.ROI;
            colorImage.ROI = deviceRoi;
            List<Point[]> quadrilaterals;
            var markers = GetMarkers(ref colorImage, deviceRoi, width, height, ref debugImage, out quadrilaterals);
            colorImage.ROI = imageRoi;

            var grayscaleImageRoi = grayscaleImage.ROI;
            grayscaleImage.ROI = deviceRoi;

            var i = 0;
            foreach (var marker in markers)
            {
                grayscaleImage.FillConvexPoly(quadrilaterals[i], Grays.White);

                var display = FindDisplayInImage(ref grayscaleImage, deviceRoi, width, height, marker, ref debugImage);

                if (display != null)
                {
                    if (IsRenderContent && IsFindDisplayContiuously)
                    {
                        var debugImageRoi = debugImage.ROI;
                        debugImage.ROI = deviceRoi;

                        var enclosingRectangle = display.EnclosingRectangle;
                        DrawEdge(ref debugImage, enclosingRectangle.LongEdge, Rgbs.Red);
                        DrawEdge(ref debugImage, enclosingRectangle.ShortEdge, Rgbs.Green);

                        debugImage.ROI = debugImageRoi;
                    }

                    DisplaySample displaySample;
                    if (_blobFoundInRgbImage.ContainsKey(device.BlobId))
                    {
                        displaySample = _blobFoundInRgbImage[device.BlobId];
                    }
                    else
                    {
                        displaySample = new DisplaySample();
                        _blobFoundInRgbImage.Add(device.BlobId, displaySample);
                    }

                    if (displaySample.NeedsSample())
                    {
                        displaySample.Sample(display.EnclosingRectangle);
                    }
                    else
                    {
                        _blobFoundInRgbImage.Remove(device.BlobId);
                        display.EnclosingRectangle = displaySample.GetBestSample();

                        Stage(display);
                    }

                    //Stage(display);
                }

                i++;
            }

            grayscaleImage.ROI = grayscaleImageRoi;
        }
开发者ID:AlternateIf,项目名称:huddle-engine,代码行数:66,代码来源:FindDisplay.cs

示例13: FindCirclesAndLinesWithHough

        public static Bitmap FindCirclesAndLinesWithHough(Image<Bgr, byte> source)
        {
            frameBgr = source;

            frameGray = frameBgr[0]
                .PyrDown()
                .Dilate(2)
                .Erode(2)
                .ThresholdBinary(VideoParameters.Default.GrayFrameThresholdLow, VideoParameters.Default.GrayFrameThresholdHigh)
                .PyrUp();

            frameCanny = HoughTransform.GetCanny(frameGray);

            var blackGray = new Gray(0);
            var blackBgr = new Bgr(Color.Black);
            frameCanny.SetValue(blackGray, RobotMask);

            var lines = HoughTransform.GetLines(frameCanny);
            Lines = lines;

            var height = VisionData.FrameSize.Height;
            foreach (var line in lines)
            {
                if (line.Length < 10 /*&& IsLineWhite(frameBgr, line)*/)
                    continue;

                var polygon = new[]
                {
                    new Point(line.P1.X, line.P1.Y + 5),
                    new Point(line.P1.X, 0),
                    new Point(line.P2.X, 0),
                    new Point(line.P2.X, line.P2.Y + 5)
                };
                /*
                var newLine = GetEdgeLine(line.P1, line.P2);
                var polygon = new[]
                {
                    new Point(newLine.P1.X, newLine.P1.Y),
                    new Point(newLine.P1.X, newLine.P1.Y - height),
                    new Point(newLine.P2.X, newLine.P2.Y - height),
                    new Point(newLine.P2.X, newLine.P2.Y)
                };
                 */

                frameCanny.FillConvexPoly(polygon, blackGray);
                frameBgr.FillConvexPoly(polygon, blackBgr);
                //frameCanny.Draw(line, new Gray(0), 5);
            }

            //Lines = HoughTransform.FilterLines(lines);
            Circles = HoughTransform.GetCircles(frameCanny.Bitmap);

            //var points = EdgeFinder.GetTopArea(blue, lines);
            //canny.FillConvexPoly(points.ToArray(), new Gray(155));

            //var contours = EdgeFinder.GetContours(canny);
            //foreach (var contour in contours)
            //	canny.FillConvexPoly(contour.ToArray(), new Gray(150));

            // HACK: Testing))
            switch (channel)
            {
                default:
                case 1:
                    return frameBgr.Bitmap;
                case 2:
                    return frameGray.Convert<Bgr, byte>().Bitmap;
                case 3:
                    return frameCanny.Convert<Bgr, byte>().Bitmap;
                case 4:
                    return new Image<Bgr, byte>(HoughTransform.CircleTransformation.ToBitmap()).Bitmap;
                case 5:
                    return frameBgr.CopyBlank().Bitmap;
                case 6:
                    var frame = frameBgr.InRange(
                        new Bgr(0, 0, 50),
                        new Bgr(VideoParameters.Default.RedThresholdBlue, VideoParameters.Default.RedThresholdGreen, VideoParameters.Default.RedThresholdRed));
                    return frame
                        .Dilate(3).Erode(6).Dilate(3)
                        .Convert<Bgr, byte>().Bitmap;
                case 7:
                    var frame2 = frameBgr.InRange(
                        new Bgr(50, 0, 0),
                        new Bgr(VideoParameters.Default.BlueThresholdBlue, VideoParameters.Default.BlueThresholdGreen, VideoParameters.Default.BlueThresholdRed));
                    return frame2
                        .Dilate(3).Erode(6).Dilate(3)
                        .Convert<Bgr, byte>().Bitmap;
                case 8:
                    var rectanglesRed = FindRedGoalRectangles();
                    var i = 1;
                    foreach (var rectangle in rectanglesRed.OrderBy(x => x.Size.Width))
                    {
                        frameBgr.Draw(rectangle, new Bgr(Color.Red), 3);
                        frameBgr.Draw(i.ToString(), ref Font, rectangle.Location + new Size(10, 10), new Bgr(Color.DarkRed));
                    }

                    var rectanglesBlue = FindBlueGoalRectangles();
                    i = 1;
                    foreach (var rectangle in rectanglesBlue.OrderBy(x => x.Size.Width))
                    {
//.........这里部分代码省略.........
开发者ID:martikaljuve,项目名称:Robin,代码行数:101,代码来源:VisionExperiments.cs

示例14: GetDepthOfRGBPixels

        public static Image<Gray, float> GetDepthOfRGBPixels(Image<Gray, float> depth, Image<Rgb, byte> rgb, Image<Rgb, float> uvmap)
        {
            // create RGB-sized image
            var retdepth = new Image<Gray, float>(rgb.Width, rgb.Height, new Gray(EmguExtensions.LowConfidence));
            var retdepthWidth = retdepth.Width;
            var retdepthHeight = retdepth.Height;

            var uvmapWidth = uvmap.Width;
            var uvmapHeight = uvmap.Height;

            var depthData = depth.Data;
            var uvmapData = uvmap.Data;

            float xfactor = 1.0f / 255.0f * retdepthWidth;
            float yfactor = 1.0f / 255.0f * retdepthHeight;

            //for (int uvy = 0; uvy < uvmapHeight - 1; uvy++)
            Parallel.For(0, uvmapHeight - 1, uvy =>
            {

                //for (int uvx = 0; uvx < uvmapWidth - 1; uvx++)
                Parallel.For(0, uvmapWidth - 1, uvx =>
                {
                    // for each point in UVmap create two triangles that connect this point with the right/bottom neighbors

                    var pts1 = new Point[3];
                    var d1 = new float[]
                    {
                        depthData[uvy, uvx, 0],
                        depthData[uvy, uvx + 1, 0],
                        depthData[uvy + 1, uvx, 0]
                    };

                    double d1avg = 0;
                    int count = 0;
                    for (int i = 0; i < d1.Length; i++)
                    {
                        if (d1[i] != EmguExtensions.Saturation && d1[i] != EmguExtensions.LowConfidence)
                        {
                            d1avg += d1[i];
                            count++;
                        }
                    }
                    if (count > 0)
                        d1avg = d1avg / (float)count;
                    else
                        d1avg = EmguExtensions.LowConfidence;

                    var pts2 = new Point[3];
                    var d2 = new float[]
                    {
                        depthData[uvy, uvx + 1, 0],
                        depthData[uvy + 1, uvx + 1, 0],
                        depthData[uvy + 1, uvx, 0]
                    };

                    double d2avg = 0;
                    count = 0;
                    for (int i = 0; i < d2.Length; i++)
                    {
                        if (d2[i] != EmguExtensions.Saturation && d2[i] != EmguExtensions.LowConfidence)
                        {
                            d2avg += d2[i];
                            count++;
                        }
                    }
                    if (count > 0)
                        d2avg = d2avg / (float)count;
                    else
                        d2avg = EmguExtensions.LowConfidence;

                    bool outofbounds = false;

                    // get points for triangle 1 (top left)
                    pts1[0].X = (int)(uvmapData[uvy, uvx, 0] * xfactor + 0.5);
                    outofbounds |= pts1[0].X < 0 || pts1[0].X > retdepthWidth;

                    pts1[0].Y = (int)(uvmapData[uvy, uvx, 1] * yfactor + 0.5);
                    outofbounds |= pts1[0].Y < 0 || pts1[0].Y > retdepthHeight;

                    pts1[1].X = (int)(uvmapData[uvy, uvx + 1, 0] * xfactor + 0.5) - 1;
                    outofbounds |= pts1[1].X < 0 || pts1[1].X > retdepthWidth;

                    pts1[1].Y = (int)(uvmapData[uvy, uvx + 1, 1] * yfactor + 0.5) - 1;
                    outofbounds |= pts1[1].Y < 0 || pts1[1].Y > retdepthHeight;

                    pts1[2].X = (int)(uvmapData[uvy + 1, uvx, 0] * xfactor + 0.5);
                    outofbounds |= pts1[2].X < 0 || pts1[2].X > retdepthWidth;

                    pts1[2].Y = (int)(uvmapData[uvy + 1, uvx, 1] * yfactor + 0.5) - 1;
                    outofbounds |= pts1[2].Y < 0 || pts1[2].Y > retdepthHeight;

                    if (!outofbounds)
                        retdepth.FillConvexPoly(pts1, new Gray(d1avg));

                    // get points for triangle 2 (bottom right)
                    outofbounds = false;

                    pts2[0].X = pts1[1].X;
                    outofbounds |= pts2[0].X < 0 || pts2[0].X > retdepthWidth;
//.........这里部分代码省略.........
开发者ID:AlternateIf,项目名称:huddle-engine,代码行数:101,代码来源:Senz3DUtils.cs

示例15: ToImage

        /// <summary>
        /// 将此直方图转换为图片
        /// </summary>
        public Image<Gray, Byte> ToImage()
        {
            int binWidth = 10, binHeight = 18;
            Image<Gray, Byte> img = new Image<Gray, byte>(RadiusBin * binWidth, ThetaBin * binHeight);

            double minCount = int.MaxValue, maxCount = int.MinValue;
            for (int i = 0; i < ThetaBin; ++i) {
                for (int j = 0; j < RadiusBin; ++j) {
                    int count = Histogram[i, j];
                    if (count < minCount) minCount = count;
                    if (count > maxCount) maxCount = count;
                }
            }

            for (int i = 0; i < ThetaBin; ++i) {
                for (int j = 0; j < RadiusBin; ++j) {
                    double color = 255 - (Histogram[ThetaBin - i - 1, j] - minCount) / (maxCount - minCount) * 255;
                    img.FillConvexPoly(new[]{
                        new Point((j+0)*binWidth, (i+0)*binHeight),
                        new Point((j+1)*binWidth, (i+0)*binHeight),
                        new Point((j+1)*binWidth, (i+1)*binHeight),
                        new Point((j+0)*binWidth, (i+1)*binHeight)
                    }, new Gray(color));
                }
            }

            return img;
        }
开发者ID:pakerliu,项目名称:sharp-context,代码行数:31,代码来源:ShapeContext.cs


注:本文中的Image.FillConvexPoly方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。