当前位置: 首页>>代码示例>>C#>>正文


C# Image.GetAverage方法代码示例

本文整理汇总了C#中Image.GetAverage方法的典型用法代码示例。如果您正苦于以下问题:C# Image.GetAverage方法的具体用法?C# Image.GetAverage怎么用?C# Image.GetAverage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Image的用法示例。


在下文中一共展示了Image.GetAverage方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: getThreshold

 //use two pass threshold to avoid much noise point
 private Image<Gray, float> getThreshold(Image<Gray,float> toBeThreshold)
 {
     Gray avg = toBeThreshold.GetAverage(null);
     Image<Gray, float> mask = toBeThreshold.ThresholdBinary(avg, new Gray(255));
     Gray avg2 = toBeThreshold.GetAverage(mask.Convert<Gray,byte>());
     return toBeThreshold.ThresholdBinary(avg2, new Gray(255));
 }
开发者ID:Charlesjean,项目名称:HandGestureRecog,代码行数:8,代码来源:Form1.cs

示例2: btnAvgHS_Click

        private void btnAvgHS_Click(object sender, EventArgs e)
        {
            Image<Hsv, byte> HsvROI = new Image<Hsv, byte>(roiImg.Width, roiImg.Height);
            CvInvoke.cvCvtColor(roiImg,HsvROI,COLOR_CONVERSION.CV_BGR2HSV);

            textBoxHue.Text=HsvROI.GetAverage().Hue.ToString();
            textBoxSat.Text=HsvROI.GetAverage().Satuation.ToString();
        }
开发者ID:cervecero84,项目名称:tracking-people,代码行数:8,代码来源:ColorSampleForm.cs

示例3: AdjWP

        public void AdjWP(Image<Bgr, byte> roiImg, Xyz WP, double[] AdjXYZ)
        {
            //Image<Bgr, byte> roiImg = (Image<Bgr, byte>)roi;
            Image<Xyz, byte> XYZROI = new Image<Xyz, byte>(roiImg.Width, roiImg.Height);
            CvInvoke.cvCvtColor(roiImg, XYZROI, COLOR_CONVERSION.CV_BGR2XYZ);
            double AvgX = XYZROI.GetAverage().X;
            double AvgY = XYZROI.GetAverage().Y;
            double AvgZ = XYZROI.GetAverage().Z;

            double AdjX = Math.Abs(WP.X / AvgX);
            double AdjY = Math.Abs(WP.Y / AvgY);
            double AdjZ = Math.Abs(WP.Z / AvgZ);

            AdjXYZ[0] = AdjX;
            AdjXYZ[1] = AdjY;
            AdjXYZ[2] = AdjZ;
        }
开发者ID:cervecero84,项目名称:tracking-people,代码行数:17,代码来源:ColorSampleForm.cs

示例4: IsAreaColorMousable

 public static bool IsAreaColorMousable(MCvConnectedComp comp, Image<Bgr, byte> source, Image<Gray, byte> sourcemask)
 {
     var newRect = new Rectangle(comp.rect.Left, comp.rect.Top, comp.rect.Width, comp.rect.Height);
     newRect.Inflate(-4, -4);
     var mask = sourcemask.SmoothGaussian(7).Dilate(7).Erode(7).Copy();
     var oldRoi = source.ROI;
     var oldMaskRoi = mask.ROI;
     source.ROI = newRect;
     mask.ROI = newRect;
     var hlscolor = source.Convert<Hls, byte>().GetAverage(mask);
     var rgbcolor = source.GetAverage(mask);
     using (var file = new System.IO.StreamWriter("output.html", true))
     {
         file.Write(
             "<p><div style=\"background-color: #{3:X2}{4:X2}{5:X2}; width: 24px; height: 24px; float: left;\"></div>{0}, {1}, {2}</p>",
             hlscolor.Hue, hlscolor.Lightness, hlscolor.Satuation, (int) rgbcolor.Red, (int) rgbcolor.Green, (int) rgbcolor.Blue);
     }
     //mask.Convert<Bgr, byte>().Copy(source, mask);
     source.ROI = oldRoi;
     mask.ROI = oldMaskRoi;
     //source.Draw(comp.rect, rgbcolor, -1);
     return hlscolor.Lightness > 102 && hlscolor.Hue < 160 && hlscolor.Satuation < 80;
 }
开发者ID:floatdrop,项目名称:information-processing,代码行数:23,代码来源:BlobDetector.cs

示例5: GrayColorSubstraction

 //simple background substraction with gray image
 private Image<Gray, byte> GrayColorSubstraction(Image<Gray,byte> bkImg, Image<Gray,byte> frame)
 {
     Gray gray = frame.GetAverage(null);
     Image<Gray, byte> dif = frame.AbsDiff(bkImg);
     Image<Gray,byte> threshold = dif.ThresholdBinary(gray, new Gray(255));
     return threshold;
 }
开发者ID:Charlesjean,项目名称:HandGestureRecog,代码行数:8,代码来源:Form1.cs

示例6: detectPolysFromContures

        /// <summary>
        /// Method detects polys from contoures, claculates poly's convex hull and triangulates it.
        /// </summary>
        /// <param name="refImg">Image</param>
        /// <param name="maxVertices">Max. vertices in poly</param>
        /// <param name="minArea">Minimal area for poly</param>
        /// <param name="maxArea">Max. area for poly</param>
        /// <param name="threshold">Threshold for segmentation</param>
        /// <param name="imageBox">Imagebox (for dumping a result)</param>
        /// <returns></returns>
        public List<PolyFromTris> detectPolysFromContures(Image<Bgr, Byte> refImg, int maxVertices, double minArea, double maxArea, float threshold, bool filter, int dilate, bool back, ref ImageBox imageBox)
        {
            // Lists, for storing data
            List<Point[]> polys = new List<Point[]>(); // Polys list
            List<Point[]> chull = new List<Point[]>(); // CONV HULL
            avgClrs = new List<Bgr>();
            List<PolyFromTris> polysTriang = new List<PolyFromTris>();

            try
            {
                Bgr backc = new Bgr(threshold, threshold, threshold);
                if (back)
                {
                    Image<Gray, Byte> maskImg = refImg.Convert<Gray, Byte>().PyrDown().PyrUp().ThresholdBinaryInv(new Gray(128.0f), new Gray(255.0f));
                    backc = refImg.GetAverage(maskImg);
                }

               // Image<Bgr, Byte> clrSeg = refImg.ThresholdBinary (back, new Bgr(120.0f, 120.0f, 120.0f));
               // imageBox.Image = clrSeg;
                // Canny
                Image<Bgr, Byte> cannyEdges = refImg.PyrUp().PyrDown().Canny(backc, new Bgr(120.0f, 120.0f, 120.0f));

                cannyEdges = cannyEdges.Dilate(dilate); // To stress borders, improves contoures detection

       
                // Conture detection & Triangulation
                using (MemStorage shramba = new MemStorage())
                    for (Contour<Point> konture = cannyEdges.Convert<Gray, Byte>().FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_TC89_L1, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, new MemStorage()); konture != null; konture = konture.HNext)
                    {
                     //   Contour<Point> trenutna = konture.ApproxPoly(konture.Perimeter * 0.005, shramba);
                        Seq<Point> convexHull = konture.ApproxPoly(konture.Perimeter * 0.005, shramba).GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);    //konture.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);

                        // Calculate max. nad min. area of valid polys (holds)
                        float min = 0.0f, max = 0.0f, perim = 0.0f;
                        calcMinMaxArea(refImg, ref min, ref max, ref perim);


                        //      if (konture.Area > minArea && konture.Area < maxArea && trenutna.Total < maxVertices)
                        
                        if ( (convexHull.Area > min && convexHull.Area < max && convexHull.Perimeter < perim && convexHull.Total < maxVertices) || !filter)
                        {
                        //    if ((convexHull.Area / convexHull.Perimeter) >5.5)
                        //    {
                                Point[] conHull = convexHull.ToArray(); //Array.ConvertAll(convexHull.ToArray(), new Converter<Point, PointF>(PointToPointF));

                                chull.Add(conHull);
                                polysTriang.Add(  triangulator.triangulatePoly(convexHull, 4, refImg.Width, refImg.Height));
                                avgClrs.Add(calcPolyBgrAvrege(refImg, convexHull.BoundingRectangle));

                                // Draw detected holds and triangulation
                                imageBox.Image = drawPolys(refImg, chull, Color.DarkRed);
                                triangulator.drawTris(refImg, polysTriang[polysTriang.Count - 1].tris, ref imageBox);
                        //    }
                            
                        }
                    }
            }
            catch (NullReferenceException)
            {
                MessageBox.Show("Error, please, input an image.");
            }

            // Return triangulated polys
            return polysTriang;
        }
开发者ID:aljosaosep,项目名称:holdrecognition,代码行数:75,代码来源:WallRecognition.cs

示例7: GetBackproject

    private Image<Gray, Byte> GetBackproject(Image<Gray, Byte> hue, DenseHistogram _hist,Image<Gray,Byte> mask,Rectangle hide)
    {
        Image<Gray, Byte> backproject = new Image<Gray, byte>(hue.Width, hue.Height);
        var imgs = new IntPtr[1] { hue };
        Emgu.CV.CvInvoke.cvCalcBackProject(imgs, backproject, _hist);
        Emgu.CV.CvInvoke.cvAnd(backproject, mask, backproject, IntPtr.Zero);

        if (th_check)
        {
            backproject.ROI = face_rect;
            if (backproject.GetAverage().Intensity < backproj_threshold/2)
            {
                isTracked = false;
            }
            th_check = false;
            Emgu.CV.CvInvoke.cvResetImageROI(backproject);
        }

        hide.Height += 50;
        Emgu.CV.CvInvoke.cvSetImageROI(backproject, hide);
        try
        {
            Emgu.CV.CvInvoke.cvZero(backproject);
        }
        catch { }
        Emgu.CV.CvInvoke.cvResetImageROI(backproject);

        return backproject;
    }
开发者ID:tuxoko,项目名称:camfight,代码行数:29,代码来源:FrameProcessor.cs

示例8: Calculate

        public short Calculate(string imageFilePath, Polygon polygon, Media.PointCollection pointCollection)
        {
            // Maskiertes Bild laden
            // Drawing.Bitmap maskedBitmap = GetMaskedBitmap(imageFilePath, pointCollection);

            Image<Bgr, byte> cvImage = new Image<Bgr, byte>(imageFilePath);

            // Maske generieren aus Polygon
            Mat matMask = new Mat(new Drawing.Size(cvImage.Cols, cvImage.Rows), DepthType.Cv8U, 3);
            // Polygone skalieren und generieren
            List<Point> scaledPoints = GetScaledPoints(pointCollection, cvImage.Rows, cvImage.Cols);
            List<Drawing.Point> scaledDrawingPoints = GetPolygonPoints(scaledPoints, cvImage.Rows, cvImage.Cols);
            // Polygon weiss zeichnen
            using (VectorOfPoint vPoint = new VectorOfPoint(scaledDrawingPoints.ToArray()))
            using (VectorOfVectorOfPoint vvPoint = new VectorOfVectorOfPoint(vPoint))
            {
                CvInvoke.FillPoly(matMask, vvPoint, new Bgr(255, 255, 255).MCvScalar);
            }
            Image<Gray, byte> imageMask = new Image<Gray, byte>(matMask.Bitmap);

            // Durchschnittsfarbe rechnen mit Maske
            Bgr result = cvImage.GetAverage(imageMask);
            // Vergleichen mit Referenzbildern
            Bgr snow = JsonConvert.DeserializeObject<Bgr>(polygon.BgrSnow);
            Bgr normal = JsonConvert.DeserializeObject<Bgr>(polygon.BgrNormal);

            double resultSnow = Math.Abs(snow.Blue - result.Blue) + Math.Abs(snow.Green - result.Green) + Math.Abs(snow.Red - result.Red);
            double resultNormal = Math.Abs(normal.Blue - result.Blue) + Math.Abs(normal.Green - result.Green) + Math.Abs(normal.Red - result.Red);

            if (Math.Abs(resultSnow - resultNormal) < 10)
            {
                return 0;
            }
            else if (resultSnow < resultNormal)
            {
                return 1;
            }
            else
            {
                return -1;
            }
        }
开发者ID:uzapy,项目名称:ch.bfh.bti7302.w2015.schneedetektion,代码行数:42,代码来源:OpenCVHelper.cs

示例9: CalculateAverageBrightessForArea

        public void CalculateAverageBrightessForArea(string reference0, string reference1, StrassenbilderMetaDataContext dataContext)
        {
            // Image-Meta-Daten laden
            string name0 = Path.GetFileNameWithoutExtension(reference0);
            string name1 = Path.GetFileNameWithoutExtension(reference1);
            Image image0 = dataContext.Images.Where(i => i.Name == name0).FirstOrDefault();
            Image image1 = dataContext.Images.Where(i => i.Name == name1).FirstOrDefault();

            // Polygone Laden
            IEnumerable<Polygon> polygons = dataContext.Polygons.Where(p => p.CameraName == image0.Place);

            // Pro Maske anwenden
            foreach (var polygon in polygons)
            {
                IList<Point> polygonPoints = JsonConvert.DeserializeObject<Media.PointCollection>(polygon.PolygonPointCollection);

                // Maskiertes Bild laden
                Drawing.Bitmap bitmap0 = GetMaskedBitmap(reference0, polygonPoints);
                Drawing.Bitmap bitmap1 = GetMaskedBitmap(reference1, polygonPoints);

                Image<Bgr, byte> cvImage0 = new Image<Bgr, byte>(bitmap0);
                Image<Bgr, byte> cvImage1 = new Image<Bgr, byte>(bitmap1);

                // Maske generieren aus Polygon
                Mat matMask = new Mat(new Drawing.Size(cvImage0.Cols, cvImage0.Rows), DepthType.Cv8U, 3);
                // Polygone skalieren und generieren
                List<Point> scaledPoints = GetScaledPoints(polygonPoints, cvImage0.Rows, cvImage0.Cols);
                List<Drawing.Point> scaledDrawingPoints = GetPolygonPoints(scaledPoints, cvImage0.Rows, cvImage0.Cols);
                // Polygon weiss zeichnen
                using (VectorOfPoint vPoint = new VectorOfPoint(scaledDrawingPoints.ToArray()))
                using (VectorOfVectorOfPoint vvPoint = new VectorOfVectorOfPoint(vPoint))
                {
                    CvInvoke.FillPoly(matMask, vvPoint, new Bgr(255, 255, 255).MCvScalar);
                }
                Image<Gray, byte> imageMask = new Image<Gray, byte>(matMask.Bitmap);

                // Durchschnittsfarbe rechnen mit Maske
                Bgr result0 = cvImage0.GetAverage(imageMask);
                Bgr result1 = cvImage1.GetAverage(imageMask);
                // Resultat abspeichern
                polygon.BgrSnow = JsonConvert.SerializeObject(result0);
                polygon.BgrNormal = JsonConvert.SerializeObject(result1);
                dataContext.SubmitChanges();
            }
        }
开发者ID:uzapy,项目名称:ch.bfh.bti7302.w2015.schneedetektion,代码行数:45,代码来源:OpenCVHelper.cs


注:本文中的Image.GetAverage方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。