当前位置: 首页>>代码示例>>C#>>正文


C# Image.InRange方法代码示例

本文整理汇总了C#中Image.InRange方法的典型用法代码示例。如果您正苦于以下问题:C# Image.InRange方法的具体用法?C# Image.InRange怎么用?C# Image.InRange使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Image的用法示例。


在下文中一共展示了Image.InRange方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: CalculateHistogram

        public void CalculateHistogram(Image<Gray, byte> source)
        {
            histogram = new DenseHistogram(16, new RangeF(0, 180));

            mask = source.InRange(VideoParameters.Default.CamshiftMaskLow, VideoParameters.Default.CamshiftMaskHigh);
            CvInvoke.cvCalcHist(new[] { source.Ptr }, histogram.Ptr, false, mask.Ptr);

            SetTrackWindow(source.ROI);
        }
开发者ID:martikaljuve,项目名称:Robin,代码行数:9,代码来源:Camshift.cs

示例2: ProcessColorImage

        public override Image ProcessColorImage(Bitmap frame, ToteDetectionType detectionType, bool detectBins)
        {
            Image<Bgr, Byte> img = new Image<Bgr, Byte>(frame);

            //// Get The Thresh Image With Given Values
            //Image<Gray, byte> thresh = (threshData as BgrThreshData).InRange(img);
            //// Pixelate Image
            //threshData.Blur(ref thresh);
            //
            //
            //Image ret = base.AnalyzeImage(thresh, detectionType, detectBins);


            //frame.Dispose();
            //thresh.Dispose();

            img = img.SmoothMedian(11);
            img = img.SmoothGaussian(11);
            img = img.Erode(15);
            img = img.Dilate(10);

            // Try this: img.HoughCircles();

            Image<Gray, byte> thresh = img.InRange(new Bgr(110, 130, 100), new Bgr(164, 166, 181));
            Contour<Point> countor = thresh.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
            List<Contour<Point>> PlayingBalls = new List<Contour<Point>>(); ;
            while (countor != null)
            {
                // filter countors
                // convex hull countors
                if (countor.Area > 50)
                    PlayingBalls.Add(countor);
                   
                countor = countor.HNext;
            }
            float resolutionOffset = ((float)thresh.Width * thresh.Height) / (640.0f * 480.0f);


            foreach (Contour<Point> ball in PlayingBalls)
            {
                
                img.Draw(ball, new Bgr(255, 0, 0), (int)Math.Ceiling(3.0f));
                // draw left countors and their min enclosing circle (draw on img)
            }
          



            Image ret = img.ToBitmap();
            img.Dispose();
            return ret;
        }
开发者ID:GreenBlitz4590Programmers,项目名称:StrongHoldVision,代码行数:52,代码来源:ImageProcessor.cs

示例3: Track

        public void Track(Image<Gray, byte> source)
        {
            if (histogram == null)
                return;

            mask = source.InRange(VideoParameters.Default.CamshiftMaskLow, VideoParameters.Default.CamshiftMaskHigh);
            backProjection = new Image<Gray, byte>(source.Size);

            CvInvoke.cvCalcBackProject(new[] {source.Ptr}, backProjection.Ptr, histogram.Ptr);
            backProjection._And(mask);

            MCvBox2D trackBox;
            CvInvoke.cvCamShift(backProjection.Ptr, trackWindow, new MCvTermCriteria(10, 1), out trackComp, out trackBox);

            trackWindow = trackComp.rect;
            trackCenter = trackBox.center;
        }
开发者ID:martikaljuve,项目名称:Robin,代码行数:17,代码来源:Camshift.cs

示例4: processFrameAndUpdateGUI

        ///////////////////////////////////////////////////////////////////////////////////////////
        void processFrameAndUpdateGUI(object sender, EventArgs arg)
        {
            imgOriginal = capWebcam.QueryFrame();               // get next frame from the webcam

            if (imgOriginal == null)                            // if we did not get a frame
            {                                                   // show error via message box
                MessageBox.Show("unable to read from webcam" + Environment.NewLine + Environment.NewLine +
                                "exiting program");
                Environment.Exit(0);                            // and exit program
            }

            imgBlurredBGR = imgOriginal.SmoothGaussian(5);          // blur

            imgProcessed = imgBlurredBGR.InRange(new Bgr(0, 0, 175), new Bgr(100, 100, 256));       // filter on color

            imgProcessed = imgProcessed.SmoothGaussian(5);          // blur again

            StructuringElementEx structuringElementEx = new StructuringElementEx(5, 5, 1, 1, CV_ELEMENT_SHAPE.CV_SHAPE_RECT);       // declare structuring element to use in dilate and erode

            CvInvoke.cvDilate(imgProcessed, imgProcessed, structuringElementEx, 1);             // close image (dilate, then erode)
            CvInvoke.cvErode(imgProcessed, imgProcessed, structuringElementEx, 1);              // closing "closes" (i.e. fills in) foreground gaps

            CircleF[] circles = imgProcessed.HoughCircles(new Gray(100), new Gray(50), 2, imgProcessed.Height / 4, 10, 400)[0];     // fill variable circles with all circles in the processed image

            foreach (CircleF circle in circles)                     // for each circle
            {
                if (txtXYRadius.Text != "") txtXYRadius.AppendText(Environment.NewLine);        // if we are not on the first line in the text box then insert a new line char

                txtXYRadius.AppendText("ball position = x " + circle.Center.X.ToString().PadLeft(4) +           // print ball position and radius
                                       ", y = " + circle.Center.Y.ToString().PadLeft(4) +                       //
                                       ", radius = " + circle.Radius.ToString("###.000").PadLeft(7));           //

                txtXYRadius.ScrollToCaret();                // scroll down in text box so most recent line added (at the bottom) will be shown

                // draw a small green circle at the center of the detected object
                CvInvoke.cvCircle(imgOriginal, new Point((int)circle.Center.X, (int)circle.Center.Y), 3, new MCvScalar(0, 255, 0), -1, LINE_TYPE.CV_AA, 0);

                imgOriginal.Draw(circle, new Bgr(Color.Red), 3);        // draw a red circle around the detected object
            }
            ibOriginal.Image = imgOriginal;             // update image boxes on form
            ibProcessed.Image = imgProcessed;           //
        }
开发者ID:almar12,项目名称:OpenCV_2.4.11_Windows_Installation_Guide,代码行数:43,代码来源:RedBallTracker.cs

示例5: getSkinOnImage

        private Image<Gray, byte> getSkinOnImage(Image<Hsv, byte> sourceImage, Hsv Hsv_min, Hsv Hsv_Max)
        {
            Image<Gray, Byte> skin = sourceImage.InRange(Hsv_min, Hsv_Max);

            skin = skin
                .SmoothGaussian(11)
                .Dilate(3)
                .SmoothGaussian(5)
                .Convert<Rgb, Byte>()
                .ThresholdBinary(new Rgb(127, 127, 127), new Rgb(255, 255, 255))
                .Convert<Gray, Byte>();

            return skin;
        }
开发者ID:TBruzdzinski,项目名称:mmcv-handCTRL,代码行数:14,代码来源:Form1.cs

示例6: CalibrateHSV

        private void CalibrateHSV(ref Image<Hsv, Byte> hsvImage, ref DenseHistogram histogram)
        {
            float horizontalFactor = 0.2f;
            float verticalFactor = 0.2f;

            int rectWidth = (int)(hsvImage.Width * horizontalFactor);
            int rectHeight = (int)(hsvImage.Height * verticalFactor);

            int topLeftX = (int)((((float)hsvImage.Width / 2) - rectWidth) / 2);
            int topLeftY = (int)(((float)hsvImage.Height - rectHeight) / 2);

            Rectangle rangeOfInterest = new Rectangle(topLeftX, topLeftY, rectWidth, rectHeight);

            Image<Gray, Byte> maskedImage = hsvImage.InRange(
                new Hsv(hue_min, saturation_min, value_min),
                new Hsv(hue_max, saturation_max, value_max));

            Image<Hsv, byte> partToCompute = hsvImage.Copy(rangeOfInterest);

            int[] h_bins = { 30, 30 };
            RangeF[] h_ranges = {
                new RangeF(0, 180),
                new RangeF(0, 255)
            };
            Image<Gray, byte>[] channels = partToCompute.Split().Take(2).ToArray();

            histogram = new DenseHistogram(h_bins, h_ranges);
            histogram.Calculate(channels, true, null);

            float minValue, maxValue;
            int[] posMinValue, posMaxValue;
            histogram.MinMax(out minValue, out maxValue, out posMinValue, out posMaxValue);
            histogram.Threshold(
                (double)minValue + (maxValue - minValue) * 40 / 100
            );

            hsvImage = maskedImage.Convert<Hsv, Byte>() //tu powstaje jakiś "First chance of exception..."
                .SmoothGaussian(5)
                .Dilate(1)
                .Convert<Rgb, Byte>()
                .ThresholdBinary(new Rgb(127,127,127), new Rgb(255,255,255))
                .Convert<Hsv, Byte>();

            //hsvImage.Draw(rangeOfInterest, new Hsv(255, 255, 255), 3);
        }
开发者ID:TBruzdzinski,项目名称:mmcv-handCTRL,代码行数:45,代码来源:Form1.cs

示例7: pobierzObraz

        void pobierzObraz() //funkcja z nieskonczoną pętlą, działa w wątku th_pobierzObraz
        {
            for(;;)
            {
                //oryginalny obraz                   
                obraz1 = kamerka.QueryFrame();
                obraz1_mod = obraz1.Copy();

                //składowa V i obraz binarny światła na obrazie z kamery
                Image<Hsv, Byte> obraz1_hsv = new Image<Hsv, byte>(obraz1.Bitmap);
                CvInvoke.cvCvtColor(obraz1, obraz1_hsv, Emgu.CV.CvEnum.COLOR_CONVERSION.BGR2HSV);
                obraz1_v = obraz1_hsv.Split()[2];
                this.Invoke((MethodInvoker)delegate
                {
                   bin_obraz1_bialy = obraz1_v.InRange(new Gray(265 - tbCzulosc.Value), new Gray(240 + tbCzulosc.Value));
                   CvInvoke.cvErode(bin_obraz1_bialy, bin_obraz1_bialy, rect_12, 5);
                   CvInvoke.cvDilate(bin_obraz1_bialy, bin_obraz1_bialy, rect_6, 5);
                   Status.pb2.Image = bin_obraz1_bialy.Bitmap;
                });                

                //składowa V i obraz binarny światła na tle
                Image<Hsv, Byte> tlo_hsv = new Image<Hsv, byte>(tlo.Bitmap);
                CvInvoke.cvCvtColor(tlo, tlo_hsv, Emgu.CV.CvEnum.COLOR_CONVERSION.BGR2HSV);
                tlo_v = tlo_hsv.Split()[2];
                bin_tlo_bialy = tlo_v.InRange(new Gray(250), new Gray(255));
                CvInvoke.cvErode(bin_tlo_bialy, bin_tlo_bialy, rect_12, 5);
                CvInvoke.cvDilate(bin_tlo_bialy, bin_tlo_bialy, rect_6, 5);
                Status.pb4.Image = bin_tlo_bialy.Bitmap;

                //różnica powyższych
                Image<Gray, Byte> bin_diff = new Image<Gray, byte>(tlo.Bitmap);              
                bin_diff = bin_obraz1_bialy - bin_tlo_bialy;
                Status.pb5.Image = bin_diff.Bitmap;

                //kontury na swietle
                MemStorage mem = new MemStorage();
                Contour<Point> kontur_all = bin_diff.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, mem);
                Contour<Point> kontur = null; //kontur ktory bedzie brany pod uwage 
                //okreslanie najwiekszego konturu
                while (kontur_all != null)
                {
                    double rozmiar = 0;
                    if (kontur != null) rozmiar = kontur.Area;
                    if (kontur_all.Area > rozmiar)
                        kontur = kontur_all;
                    kontur_all = kontur_all.HNext;
                }

                if (kontur != null && kontur.Area > 500)
                {
                    kontur = kontur.ApproxPoly(kontur.Perimeter * 0.0025, mem);
                    obraz1_mod.Draw(kontur, new Bgr(Color.Red), 12);
                    st = true;
                }
                else
                {
                    st = false;
                }

                //wyswietlanie obrazu z kamerki z naznaczonymi konturami
                pb1.Image = obraz1_mod.Bitmap;
                //wyswietlanie tla
                Status.pb3.Image = tlo.Bitmap;
            }
        }
开发者ID:s9908,项目名称:naiMorse,代码行数:65,代码来源:Form1.cs

示例8: myKinect_ColorFrameReady

        void myKinect_ColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
        {
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                if (colorFrame == null) return;

                bmap = OpenCV2WPFConverter.ColorImageFrameToBitmap(colorFrame);

                imgBgr = new Image<Bgr, Byte>(bmap);
                imgHsv = new Image<Hsv, Byte>(bmap);

                if (imgBgr == null || imgHsv == null) return;
                processedBgr = imgBgr.InRange(new Bgr(B_min, G_min, R_min), new Bgr(B_max, G_max, R_max));

                processedHsv = imgHsv.InRange(new Hsv(H_min, S_min, V_min), new Hsv(H_max, S_max, V_max));
                //0,130,0 ~ 120, 256, 120 for green color.
                processedBgr = processedBgr.SmoothGaussian(7);
                processedHsv = processedHsv.SmoothGaussian(7);

                CircleF[] circlesBgr = processedBgr.HoughCircles(cannyThreshold, circleAccumulatorThreshold
                    , 2, processedBgr.Height / 8 , 8, 40)[0];

                CircleF[] circlesHsv = processedBgr.HoughCircles(cannyThreshold, circleAccumulatorThreshold
                    , 2, processedHsv.Height / 8, 8, 40)[0];

                HsvCircleCount = 0;
                RgbCircleCount = 0;

                // Draw Circles for RBG video stream
                foreach (CircleF circle in circlesBgr)
                {

                    RgbCircleCount += 1;
                    imgBgr.Draw(circle, new Bgr(System.Drawing.Color.Bisque), 3);

                }

                // Draw Circles for HSV video stream
                foreach (CircleF circle in circlesHsv)
                {

                    HsvCircleCount += 1;
                    imgBgr.Draw(circle, new Bgr(System.Drawing.Color.Bisque), 3);

                }

                kinectVideo.Source = OpenCV2WPFConverter.ToBitmapSource(imgBgr);
                HsvVideo.Source = OpenCV2WPFConverter.ToBitmapSource(processedHsv);
                RgbVideo.Source = OpenCV2WPFConverter.ToBitmapSource(processedBgr);
                //control the distance of different circles!
                this.HsvCircleUI.Content = HsvCircleCount.ToString();
                this.RgbCircleUI.Content = RgbCircleCount.ToString();

            }
        }
开发者ID:handsomesun,项目名称:hsv-finder,代码行数:55,代码来源:MainWindow.xaml.cs

示例9: Form1

        public Form1()
        {
            InitializeComponent();

            Image<Gray, Byte> imgProcessed;
            string src = @"Bilinear_interpolation.png";
            Image<Bgr, Byte> newimgOriginal;

            newimgOriginal = new Image<Bgr, byte>((Bitmap)Bitmap.FromFile(src));

            imgProcessed = newimgOriginal.InRange(new Bgr(0, 0, 0), new Bgr(100, 100, 100));

            //newimgOriginal[1, 1] = newimgOriginal[1, 1];// newimgOriginal[]

            int k = 3;

            Image<Bgr, Byte> zoom_img = new Image<Bgr, byte>(imgProcessed.Width * k, imgProcessed.Height * k);

            for (int i = 0; i < imgProcessed.Height * k; i++)
            {
                for (int j = 0; j < imgProcessed.Width * k; j++)
                {
                    if (i % k == 0)//i
                    {
                        if (j % k == 0)//j
                        {
                            zoom_img[i, j] = newimgOriginal[i / k, j / k];
                        }
                        else// j%k+1
                        {
                            Bgr color_l = newimgOriginal[i / k, j / k];
                            Bgr color_r;
                            if ((j / k) + 1 < newimgOriginal.Width)
                                color_r = newimgOriginal[i / k, (j / k)+1];
                            else
                                color_r = color_l;
                            Bgr bgr = new Bgr((color_l.Blue + color_r.Blue) / 2, (color_l.Green + color_r.Green) / 2, (color_l.Red + color_r.Red) / 2);
                            zoom_img[i, j] = bgr;
                        }
                    }
                    else //i
                    {
                        if (j % k == 0)//j
                        {
                            Bgr color_l = newimgOriginal[i / k, j / k];
                            Bgr color_r;
                            if ((i / k) + 1 < newimgOriginal.Height)
                                color_r = newimgOriginal[Convert.ToInt32((i / k) + 1), j / k];
                            else
                                color_r = color_l;
                            Bgr bgr = new Bgr((color_l.Blue + color_r.Blue) / 2, (color_l.Green + color_r.Green) / 2, (color_l.Red + color_r.Red) / 2);
                            zoom_img[i, j] = bgr;
                        }
                        else //j
                        {
                            Bgr color_l_t = newimgOriginal[i / k, j / k];
                            Bgr color_r_t;
                            if ((i / k) + 1 < newimgOriginal.Height)
                                color_r_t = newimgOriginal[(i / k) + 1, j / k];
                            else
                                color_r_t = color_l_t;
                            Bgr bgr_top = new Bgr((color_l_t.Blue + color_r_t.Blue) / 2, (color_l_t.Green + color_r_t.Green) / 2, (color_l_t.Red + color_r_t.Red) / 2);

                            Bgr bgr_bottom;
                            if ((j / k) + 1 < newimgOriginal.Width)
                            {
                                Bgr color_l_b = newimgOriginal[i / k, (j / k) + 1];
                                Bgr color_r_b;
                                if ((i / k) + 1 < newimgOriginal.Height)
                                    color_r_b = newimgOriginal[(i / k) + 1, (j / k) + 1];
                                else
                                    color_r_b = color_l_b;
                                bgr_bottom = new Bgr((color_l_b.Blue + color_r_b.Blue) / 2, (color_l_b.Green + color_r_b.Green) / 2, (color_l_b.Red + color_r_b.Red) / 2);
                            }
                            else
                                bgr_bottom = bgr_top;

                            Bgr bgr = new Bgr((bgr_top.Blue + bgr_bottom.Blue) / 2, (bgr_top.Green + bgr_bottom.Green) / 2, (bgr_top.Red + bgr_bottom.Red) / 2);

                            zoom_img[i, j] = bgr;
                        }
                    }
                }
            }
            // imgProcessed = matrix_new.ToImage;
            // CvInvoke.ma
            //Image<Bgr, Byte> img = matrix_new.QueryFrame().ToImage<Bgr, Byte>();

            new_img_box.Image = newimgOriginal;
            this.zoom_img_block.Image = zoom_img;
        }
开发者ID:pkt-fit-knu,项目名称:I22-21,代码行数:91,代码来源:Form1.cs

示例10: RefreshWindow

        /* Odświezanie okna z obrazem */
        void RefreshWindow(object sender, EventArgs arg)
        {
            //Pobieranie ramki
            image = capture.QueryFrame();
            image = image.Flip(FLIP.HORIZONTAL);
            imageBox1.Image = image;

            //YCbCr or Bgr(RGB)
            //Warto zwrócić uwagę na to że Ycc to Y,Cr,Cb a nie Y,Cb,Cr, oraz Bgr to Blue,Green,Red
            if (radioButton1.Checked)
                imageGray = image.Resize((double)nupScale.Value, INTER.CV_INTER_CUBIC).Convert<Ycc, Byte>().
                                  InRange(new Ycc((double)nudW1.Value, (double)nudW3.Value, (double)nudW2.Value), new Ycc((double)nudW4.Value, (double)nudW6.Value, (double)nudW5.Value));
            else
                imageGray = image.InRange(new Bgr((double)nudW3.Value, (double)nudW2.Value, (double)nudW1.Value), new Bgr((double)nudW6.Value, (double)nudW5.Value, (double)nudW4.Value));

            if (medianCB.Checked)
                imageGray = imageGray.SmoothMedian((int)nudMedian.Value);

            //Image<Gray, Byte> sgm = new Image<Gray, Byte>(imageGray.Size);
            Bitmap bmp = imageGray.ToBitmap();
            bc.ProcessImage(bmp);

            Blob[] blob = bc.GetObjectsInformation();

            //one hand version
            //int iters = bc.ObjectsCount > 1 ? 1 : bc.ObjectsCount;
            int iters = bc.ObjectsCount > 2 ? 2 : bc.ObjectsCount;
            if(iters > 1) {
                //both hands version
                //lewa reka to ta z prawej strony obrazu (zwierciadlo), nie zakladamy ze user gestykuluje na krzyz, keep it simple
                blob = blob.Take(2).OrderByDescending(a => a.CenterOfGravity.X).ToArray<Blob>();
            }

            int centerOfGravityLHandX = 0, centerOfGravityLHandY = 0, centerOfGravityRHandX = 0, centerOfGravityRHandY = 0;

            string[] gestureLabel = new string[2];
            int i = 0;
            for (; i < iters; ++i) {
                IntPoint minXY, maxXY;
                PointsCloud.GetBoundingRectangle(bc.GetBlobsEdgePoints(blob[i]), out minXY, out maxXY);
                Bitmap clonimage = (Bitmap)bmp.Clone();
                BitmapData data = bmp.LockBits(new Rectangle(0, 0, imageGray.Width, imageGray.Height), ImageLockMode.ReadWrite, bmp.PixelFormat);
                Drawing.Rectangle(data, blob[i].Rectangle, Color.White);
                bmp.UnlockBits(data);

                int X = maxXY.X, Y = maxXY.Y;
                int x = minXY.X, y = minXY.Y;

                observed[0,i] = blob[i].Fullness;

                /* malinowska kryjaka liczy obwod ze wzoru na prostokąt, nasza liczy piksele krawedziowe */
                //Malinowska(i) = (2*bb(3)+2*bb(4))/(2*sqrt(pi*S)) - 1;
                observed[2,i] = (double)(bc.GetBlobsEdgePoints(blob[i]).Count) / 2 / Math.Sqrt(Math.PI * blob[i].Area) - 1;
                //MalinowskaZ(i) = 2*sqrt(pi*S)/(2*bb(3)+2*bb(4));
                observed[3,i] = 2 * Math.Sqrt(Math.PI * blob[i].Area) / (double)(bc.GetBlobsEdgePoints(blob[i]).Count);

                int gx = (int)blob[i].CenterOfGravity.X, gy = (int)blob[i].CenterOfGravity.Y;

                //Sprawdzenie która ręka prawa, a która lewa
                if (gx > centerOfGravityRHandX)
                {
                    centerOfGravityLHandX = centerOfGravityRHandX;
                    centerOfGravityLHandY = centerOfGravityRHandY;
                    centerOfGravityRHandX = gx;
                    centerOfGravityRHandY = gy;
                }
                else
                {
                    centerOfGravityLHandX = gx;
                    centerOfGravityLHandY = gy;
                }

                double blairsum = 0;
                int ftx = 0, ftxMax = 0;

                byte[, ,] dd = imageGray.Data;
                for (int j = y; j < Y; ++j) {
                    if (ftx > ftxMax) ftxMax = ftx;
                    ftx = 0;//bo moze sie zdazyc ze zliczy wiecej linii naraz, patrz: idealny prostokat
                    for (int k = x; k < X; ++k) {
                        if (dd[j, k, 0] != 0) {
                            ++ftx;
                            blairsum += (k - gx) * (k - gx) + (j - gy) * (j - gy);//distance squared
                        } else {
                            if (ftx > ftxMax) ftxMax = ftx;
                            ftx = 0;
                        }
                        dd[j, k, 0] = 255;
                    }
                }

                /*    aby policzyc ftyMax trzeba puscic jeszcze jedna petle tak aby wewnetrzna szla po y-kach
                    * ale mozna tez aproksymowac ftYmax przez boundingbox.Y, wtedy
                    * przewidywalem najwieksze rozbieznosci przy skosnych lub dziurawych obiektach;
                    * ale blad byl ponizej procenta, wiec szkoda tracic czas na kolejne O(n*n)

                int fty = 0, ftyMax = 0;
                for (int j = x; j < X; ++j) {
                    if (fty > ftyMax) ftyMax = fty;
//.........这里部分代码省略.........
开发者ID:rampler,项目名称:KameraMyszkaEmguCV,代码行数:101,代码来源:KameraMyszka.cs

示例11: getBlueHsvMask

 public Image<Gray, Byte> getBlueHsvMask(Image<Hsv, Byte> src, Hsv blue_min, Hsv blue_max)
 {
     Image<Gray, Byte> TMP = new Image<Gray, byte>(src.Width, src.Height);
       TMP = src.InRange(blue_min, blue_max);
       return TMP;
 }
开发者ID:Lerbytech,项目名称:ShapeDetection,代码行数:6,代码来源:MainForm.cs

示例12: FindCirclesAndLinesWithHough

        public static Bitmap FindCirclesAndLinesWithHough(Image<Bgr, byte> source)
        {
            frameBgr = source;

            frameGray = frameBgr[0]
                .PyrDown()
                .Dilate(2)
                .Erode(2)
                .ThresholdBinary(VideoParameters.Default.GrayFrameThresholdLow, VideoParameters.Default.GrayFrameThresholdHigh)
                .PyrUp();

            frameCanny = HoughTransform.GetCanny(frameGray);

            var blackGray = new Gray(0);
            var blackBgr = new Bgr(Color.Black);
            frameCanny.SetValue(blackGray, RobotMask);

            var lines = HoughTransform.GetLines(frameCanny);
            Lines = lines;

            var height = VisionData.FrameSize.Height;
            foreach (var line in lines)
            {
                if (line.Length < 10 /*&& IsLineWhite(frameBgr, line)*/)
                    continue;

                var polygon = new[]
                {
                    new Point(line.P1.X, line.P1.Y + 5),
                    new Point(line.P1.X, 0),
                    new Point(line.P2.X, 0),
                    new Point(line.P2.X, line.P2.Y + 5)
                };
                /*
                var newLine = GetEdgeLine(line.P1, line.P2);
                var polygon = new[]
                {
                    new Point(newLine.P1.X, newLine.P1.Y),
                    new Point(newLine.P1.X, newLine.P1.Y - height),
                    new Point(newLine.P2.X, newLine.P2.Y - height),
                    new Point(newLine.P2.X, newLine.P2.Y)
                };
                 */

                frameCanny.FillConvexPoly(polygon, blackGray);
                frameBgr.FillConvexPoly(polygon, blackBgr);
                //frameCanny.Draw(line, new Gray(0), 5);
            }

            //Lines = HoughTransform.FilterLines(lines);
            Circles = HoughTransform.GetCircles(frameCanny.Bitmap);

            //var points = EdgeFinder.GetTopArea(blue, lines);
            //canny.FillConvexPoly(points.ToArray(), new Gray(155));

            //var contours = EdgeFinder.GetContours(canny);
            //foreach (var contour in contours)
            //	canny.FillConvexPoly(contour.ToArray(), new Gray(150));

            // HACK: Testing))
            switch (channel)
            {
                default:
                case 1:
                    return frameBgr.Bitmap;
                case 2:
                    return frameGray.Convert<Bgr, byte>().Bitmap;
                case 3:
                    return frameCanny.Convert<Bgr, byte>().Bitmap;
                case 4:
                    return new Image<Bgr, byte>(HoughTransform.CircleTransformation.ToBitmap()).Bitmap;
                case 5:
                    return frameBgr.CopyBlank().Bitmap;
                case 6:
                    var frame = frameBgr.InRange(
                        new Bgr(0, 0, 50),
                        new Bgr(VideoParameters.Default.RedThresholdBlue, VideoParameters.Default.RedThresholdGreen, VideoParameters.Default.RedThresholdRed));
                    return frame
                        .Dilate(3).Erode(6).Dilate(3)
                        .Convert<Bgr, byte>().Bitmap;
                case 7:
                    var frame2 = frameBgr.InRange(
                        new Bgr(50, 0, 0),
                        new Bgr(VideoParameters.Default.BlueThresholdBlue, VideoParameters.Default.BlueThresholdGreen, VideoParameters.Default.BlueThresholdRed));
                    return frame2
                        .Dilate(3).Erode(6).Dilate(3)
                        .Convert<Bgr, byte>().Bitmap;
                case 8:
                    var rectanglesRed = FindRedGoalRectangles();
                    var i = 1;
                    foreach (var rectangle in rectanglesRed.OrderBy(x => x.Size.Width))
                    {
                        frameBgr.Draw(rectangle, new Bgr(Color.Red), 3);
                        frameBgr.Draw(i.ToString(), ref Font, rectangle.Location + new Size(10, 10), new Bgr(Color.DarkRed));
                    }

                    var rectanglesBlue = FindBlueGoalRectangles();
                    i = 1;
                    foreach (var rectangle in rectanglesBlue.OrderBy(x => x.Size.Width))
                    {
//.........这里部分代码省略.........
开发者ID:martikaljuve,项目名称:Robin,代码行数:101,代码来源:VisionExperiments.cs

示例13: SearchForObjects

        public Rectangle SearchForObjects(Image<Hsv, Byte> hsv)
        {
            rectangles = new List<Rectangle>();
            for (int i = 0; i < colors.Count; i= i+2)
            {
                Image<Gray, Byte> rangedImg = hsv.InRange(colors[i], colors[i + 1]);
                Rectangle rectangle = rec.getBoundingBox(rangedImg);
                if (rectangle.Height != 0)
                {

                    rectangles.Add(rectangle);
                }
            }
            if (rectangles.Count == 0)
                return new Rectangle(0, 0, 0, 0);
            else
                return BiggestRectangle(rectangles);
        }
开发者ID:boschbc,项目名称:NaoRobot,代码行数:18,代码来源:Processing.cs

示例14: ColorGrabber

        void ColorGrabber(object sender, EventArgs e)
        {
            currentFrame = grabber.QueryFrame();
            if (currentFrame != null)
            {
                currentFrameCopy = currentFrame.Copy();

                Image<Hsv, Byte> hsvimg = currentFrameCopy.Convert<Hsv, Byte>();
                Image<Gray, Byte>[] channels = hsvimg.Split();
                Image<Gray, Byte> imghue = channels[0];
                Image<Gray, Byte> huefilter = currentFrame.InRange(new Bgr(100,0,0),new Bgr(256,100,100));

                imageBoxSkin.Image = huefilter;
                imageBoxFrameGrabber.Image = currentFrame;
                ExtractContourAndHull(huefilter);
               // DrawAndComputeFingersNum();
                if(huefilter == currentFrame.InRange(new Bgr(100,0,0),new Bgr(256,100,100)) )
                { blue = 6; }

            }
        }
开发者ID:nikhilemmanuel,项目名称:Gesturised_Computer_Operation,代码行数:21,代码来源:Form1.cs

示例15: binaryThresholdNiBlack

        private Image<Gray, Byte> binaryThresholdNiBlack(Image<Gray, Byte> handImage)
        {
            Gray media;
            MCvScalar desest;
            MCvScalar mediaValue;
            double cv; 
            double Kder = 2;
            double Kizq = 0.2;
            double Rizq = 0;
            double Rder = 0; 


            handImage.AvgSdv(out media, out desest);
            mediaValue = media.MCvScalar;

            cv = desest.v0 / mediaValue.v0;

            if (mediaValue.v0 < 30.0)
                Kder = 2.3; 

            Rizq = mediaValue.v0 - (Kizq * desest.v0);
            Rder = mediaValue.v0 + (Kder * desest.v0); 

            //saveStatictics(numFrames, mediaValue.v0, desest.v0, cv, Rizq, Rder); 

            handImage = handImage.InRange(new Gray(Rizq), new Gray(Rder));
            handImage.Save(path2 + numFrames.ToString() + "_Inv.png");  
            //handImage = handImage.Not(); 

            //handImage = handImage.ThresholdBinary(media, new Gray(255));

            handImage = closeOperation(handImage); 
            handImage = openingOperation(handImage);  

            return handImage;
        }//end BinaryThresholdNiBlack  
开发者ID:americamm,项目名称:SystemVersions,代码行数:36,代码来源:HandSegmentation.cs


注:本文中的Image.InRange方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。