当前位置: 首页>>代码示例>>C#>>正文


C# Image.Erode方法代码示例

本文整理汇总了C#中Image.Erode方法的典型用法代码示例。如果您正苦于以下问题:C# Image.Erode方法的具体用法?C# Image.Erode怎么用?C# Image.Erode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Image的用法示例。


在下文中一共展示了Image.Erode方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: ProcessFrame

		public void ProcessFrame(int threshold)
		{
			m_OriginalImage = m_Capture.QueryFrame();

			m_ClippedImage = m_OriginalImage.Copy(this.RegionOfInterest);

			// Make the dark portions bigger
			m_ErodedImage = m_ClippedImage.Erode(1);

			//Convert the image to grayscale
			m_GrayImage = m_ErodedImage.Convert<Gray, Byte>();

			m_BlackAndWhiteImage = m_GrayImage.ThresholdBinaryInv(new Gray(threshold), new Gray(255));

			FindRectangles(m_BlackAndWhiteImage);

			this.FoundRectangleCount = m_FoundRectangles.Count;
			if (this.FoundRectangleCount == m_ImageModel.ExpectedRectangleCount)
			{
				m_ImageModel.AssignFoundRectangles(m_FoundRectangles);
				m_FoundRectanglesImage = CreateRectanglesImage(m_ImageModel.GetInsideRectangles());
			}
			else
			{
				m_FoundRectanglesImage = CreateRectanglesImage(m_FoundRectangles);
			}
		}
开发者ID:eldb2,项目名称:robotic-tic-tac-toe-lynxmotion,代码行数:27,代码来源:MainFormModel.cs

示例2: ProcessColorImage

        public override Image ProcessColorImage(Bitmap frame, ToteDetectionType detectionType, bool detectBins)
        {
            Image<Bgr, Byte> img = new Image<Bgr, Byte>(frame);

            //// Get The Thresh Image With Given Values
            //Image<Gray, byte> thresh = (threshData as BgrThreshData).InRange(img);
            //// Pixelate Image
            //threshData.Blur(ref thresh);
            //
            //
            //Image ret = base.AnalyzeImage(thresh, detectionType, detectBins);


            //frame.Dispose();
            //thresh.Dispose();

            img = img.SmoothMedian(11);
            img = img.SmoothGaussian(11);
            img = img.Erode(15);
            img = img.Dilate(10);

            // Try this: img.HoughCircles();

            Image<Gray, byte> thresh = img.InRange(new Bgr(110, 130, 100), new Bgr(164, 166, 181));
            Contour<Point> countor = thresh.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
            List<Contour<Point>> PlayingBalls = new List<Contour<Point>>(); ;
            while (countor != null)
            {
                // filter countors
                // convex hull countors
                if (countor.Area > 50)
                    PlayingBalls.Add(countor);
                   
                countor = countor.HNext;
            }
            float resolutionOffset = ((float)thresh.Width * thresh.Height) / (640.0f * 480.0f);


            foreach (Contour<Point> ball in PlayingBalls)
            {
                
                img.Draw(ball, new Bgr(255, 0, 0), (int)Math.Ceiling(3.0f));
                // draw left countors and their min enclosing circle (draw on img)
            }
          



            Image ret = img.ToBitmap();
            img.Dispose();
            return ret;
        }
开发者ID:GreenBlitz4590Programmers,项目名称:StrongHoldVision,代码行数:52,代码来源:ImageProcessor.cs

示例3: ProcessFrame

		public void ProcessFrame(int threshold)
		{
			DisposeImages();
			m_OriginalImage = m_Capture.QueryFrame();

			m_ClippedImage = m_OriginalImage.Copy(this.RegionOfInterest);
			//m_ClippedImage.PyrDown().PyrUp();

			//Image<Gray, Byte>[] channels = new Image<Gray,byte>[]
			//{
			//    m_ClippedImage[0],
			//    m_ClippedImage[1],
			//    m_ClippedImage[2]
			//};

			//for (int i = 0; i < 3; i++)
			//{
			//    channels[i]._EqualizeHist();
			//}
			//m_ClippedImage[0]._EqualizeHist();
			//m_ClippedImage[1]._EqualizeHist();
			//m_ClippedImage[2]._EqualizeHist();

			//m_WhiteBalancedImage = channels[2]; // new Image<Bgr, byte>(channels);

			// Make the dark portions bigger
			m_ErodedImage = m_ClippedImage.Erode(1);

			//StructuringElementEx structuringElementEx= new StructuringElementEx(new int[1,1], 0,0);
			//m_WhiteBalancedImage = m_ErodedImage.MorphologyEx(structuringElementEx, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_BLACKHAT, 2);   //.Erode(1);

			//Convert the image to grayscale
			m_GrayImage = m_ErodedImage.Convert<Gray, Byte>(); //.PyrDown().PyrUp();

			//Bgr threshold = new Bgr(127, 127, 127);
			//Bgr maxValue = new Bgr(255, 255, 255);
			m_BlackAndWhiteImage = m_GrayImage.ThresholdBinaryInv(new Gray(threshold), new Gray(255));

			List<MCvBox2D> foundRectangles = FindRectangles(m_BlackAndWhiteImage);
			//Debug.WriteLine(foundRectangles.Count);
			//if (foundRectangles.Count != m_ImageModel.ExpectedRectangleCount)
			//{
			//    // not all required rectangles found
			//    return;
			//}

			//m_ImageModel.AssignFoundRectangles(foundRectangles);
			////AssignFoundRectangles(foundRectangles);
			//m_FoundRectanglesImage = CreateRectanglesImage();
			//RecordGamePieces();
		}
开发者ID:eldb2,项目名称:robotic-tic-tac-toe-lynxmotion,代码行数:51,代码来源:BoardVision.cs

示例4: Run

        public void Run()
        {
            base.Output = new cImage(Input.Width, Input.Height, Input.Depth, base.ListChannelsToBeProcessed.Count);
            for (int IdxChannel = 0; IdxChannel < base.ListChannelsToBeProcessed.Count; IdxChannel++)
            {
                int CurrentChannel = base.ListChannelsToBeProcessed[IdxChannel];

                Image<Gray, float> inputImage = new Image<Gray, float>(Input.Width, Input.Height);

                for (int j = 0; j < Input.Height; j++)
                    for (int i = 0; i < Input.Width; i++)
                        inputImage.Data[j, i, 0] = Input.SingleChannelImage[CurrentChannel].Data[i + j * Input.Width];

                Image<Gray, float> ProcessedImage = new Image<Gray, float>(inputImage.Width, inputImage.Height);
                ProcessedImage = inputImage.Erode(this.Iterations);

                this.Output.SingleChannelImage[IdxChannel].SetNewDataFromOpenCV(ProcessedImage);
            }

            return;
        }
开发者ID:cyrenaique,项目名称:HCSA,代码行数:21,代码来源:cImageMorphoErode.cs

示例5: DetectSquares

        public static DetectionData DetectSquares(Image<Gray, byte> src, string detectionWindow = "")
        {
            for (int i = 0; i < 1; i++)
            {
                src = src.PyrDown();
                src = src.PyrUp();
            }
            src = src.Erode(1);

            Gray cannyThreshold = new Gray(255);
            Gray cannyThresholdLinking = new Gray(1);

            Image<Gray, Byte> cannyEdges = src.Canny(cannyThreshold.Intensity,cannyThresholdLinking.Intensity,3);
            LineSegment2D[] lines = cannyEdges.HoughLinesBinary(
                    1, //Distance resolution in pixel-related units
                    Math.PI / 45.0, //Angle resolution measured in radians.
                    20, //threshold
                    30, //min Line width
                    10 //gap between lines
                    )[0]; //Get the lines from the first channel

            List<Rectangle> rectanglesList = new List<Rectangle>();

            using (var storage = new MemStorage())
            {
                for (Contour<Point> contours = cannyEdges.FindContours(); contours != null; contours = contours.HNext)
                {
                    Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage);

                    if (currentContour.BoundingRectangle.Height * currentContour.BoundingRectangle.Width > 50) //only consider contours with area greater than 250
                    {
                        if (currentContour.Total >= 4) //The contour has more than 4 vertices.
                        {
                            var boundingRectangle = currentContour.BoundingRectangle;
                            if (!rectanglesList.Exists(rect => rect.IntersectsWith(boundingRectangle)))
                                rectanglesList.Add(boundingRectangle);
                        }
                    }
                }
            }
            ShowInNamedWindow(cannyEdges, detectionWindow);
            return new DetectionData(rectanglesList, src);
        }
开发者ID:DormantDreams,项目名称:video-game-level-scanner,代码行数:43,代码来源:ImageTools.cs

示例6: Compute

        public static int Compute(ref Image<Gray, byte> imageSource)
        {
            Image<Gray, byte> imageProcessed = imageSource.Erode(1);    //erode image to prevent connected fingertips and other image defects

            //crop image for faster processing and noise cancellation, since right hand is always going to show up in the left of the image
            Image<Gray, byte> imageCropped = new Image<Gray, byte>(imageWidthCropped, imageHeightCropped);
            for (int i = 0; i < imageWidthCropped; i++)
                for (int j = 0; j < imageHeightCropped; j++)
                    imageCropped.Data[j, i, 0] = imageProcessed.Data[j, i, 0];

            //locating palm by eroding fingers (smoothing, equalizing, then thresholding is better than conventional erosion) and dilating the final palm shape
            Image<Gray, byte> imagePalm = imageCropped.Clone();
            imagePalm._SmoothGaussian(9);
            imagePalm._EqualizeHist();
            imagePalm._ThresholdBinary(new Gray(254), white);
            imagePalm = imagePalm.Dilate(5);

            blobDetectorPalm.Compute(ref imagePalm, 255);    //marking biggest blob (connected pixels cluster) as the palm blob

            //crop the palm blob to exact height using custom heuristics
            int medianWidth = blobDetectorPalm.BlobMaxArea.ComputeMedianWidth();
            int palmCropY = (int)(blobDetectorPalm.BlobMaxArea.YMin + medianWidth * 1.5d);

            List<Point> palmPoints = new List<Point>();
            foreach (Point pt in blobDetectorPalm.BlobMaxArea.Data)
                if (pt.Y < palmCropY)
                {
                    palmPoints.Add(pt);
                    imageCropped.Data[pt.Y, pt.X, 0] = 191;
                }

            //finding the border pixels of palm blob by checking if the pixel is bordering a white pixel
            List<Point> palmBlobBorder = new List<Point>();
            foreach (Point pt in palmPoints)
            {
                int xMin = pt.X - 1, xMax = pt.X + 1,    //8 points surrounding pt
                    yMin = pt.Y - 1, yMax = pt.Y + 1;
                checkBounds(ref xMin, ref xMax, imageWidthCropped);    //check if values are out of bounds of imageCropped
                checkBounds(ref yMin, ref yMax, imageHeightCropped);

                bool kill = false;
                for (int i = xMin; i <= xMax; i++)    //survey 8 points surrounding pt
                {
                    for (int j = yMin; j <= yMax; j++)
                        if (imageCropped.Data[j, i, 0] == 255)    //detect pixels that border white pixels
                        {
                            palmBlobBorder.Add(pt);
                            kill = true;
                            break;
                        }

                    if (kill) break;
                }
            }

            foreach (Point pt in palmBlobBorder) imageCropped.Data[pt.Y, pt.X, 0] = 255;   //setting the color of palm border pixels to white to avoid impeding the progress of bresenham line algorithm

            double minLineLength = 0d;    //minimum length in order to be marked as line that travels to a fingertip
            List<LineSegment2D> lines = new List<LineSegment2D>();

            //path finding algorithm, find all straight lines that originate from palm boarder and travel to hand shape border
            foreach (Point pt in palmBlobBorder)
                for (int i = 340; i >= 200; i--)    //radiate lines between angles 200 and 340 (upwards between 20 and 160)
                {
                    Point ptResult;
                    double angle = i * Math.PI / 180d;
                    Point ptCircle = getCircumferencePoint(pt, 160, angle);    //end point of the line (opposing the origin end)
                    int x = pt.X, y = pt.Y, x2 = ptCircle.X, y2 = ptCircle.Y;

                    if (bresenham(ref x, ref y, ref x2, ref y2, ref imageCropped, out ptResult))    //radiate lines between orign and end points
                    {
                        LineSegment2D line = new LineSegment2D(ptResult, pt);
                        lines.Add(line);
                        minLineLength += line.Length;    //add current line length to minLineLength since the latter is average length times a coefficient
                    }
                }

            //filter fingerlines to remove ones that do not travel to fingertips, then draw fingerlines that are left onto an image and run blob detection to find finger blobs
            minLineLength = minLineLength / lines.Count * 2.5d;
            Image<Gray, byte> imageFingers = new Image<Gray, byte>(imageWidthCropped, imageHeightCropped);    //new image where all lines that travel to fingertips will be drawn

            foreach (LineSegment2D line in lines)
                if (line.Length > minLineLength || line.P1.X == 0)
                        imageFingers.Draw(line, new Gray(255), 1);    //draw finger lines that are longer than minLineLength, or if fingerline borders the left edge of the image in case the finger isn't fully within view

            imageFingers._SmoothGaussian(3);
            imageFingers._ThresholdBinary(new Gray(254), white);    //smooth drawn fingerlines into finger blobs
            blobDetectorFingers.Compute(ref imageFingers, 255);
            int fingersCount = 0;

            if (blobDetectorFingers.Blobs.Count > 1)
            {
                //heuristics for eliminating false blobs, specifically needed when a fist is shown
                foreach (Blob blob in blobDetectorFingers.Blobs)
                    if (blob.ComputeMaxWidth() >= 2 && blob.Length / (blob.ComputeMedianWidth() + 1) >= 3)
                    {
                        int verificationCount = 0;
                        foreach (Blob blobCurrent in blobDetectorFingers.Blobs)
                            if (blob != blobCurrent && blob.YMin < blobCurrent.YMax)
                                verificationCount++;
//.........这里部分代码省略.........
开发者ID:sohguodong,项目名称:HandsFree,代码行数:101,代码来源:GestureReader.cs

示例7: Filter

        private void Filter()
        {
            // Create thresholds
            Hsv threshold_lower = new Hsv(Color_spot.Hue - 25, 100, 100);
            Hsv threshold_higher = new Hsv(Color_spot.Hue + 25, 240, 240);

            // Blur image and find colors between thresholds
            Image_filtered = Image_transformed.Convert<Hsv, Byte>().SmoothBlur(20, 20).InRange(threshold_lower, threshold_higher);

            // Increase size of the spot and remove possible hole where color was too bright
            Image_filtered = Image_filtered.Dilate(5);

            // Decrease size again a little, makes it smoother
            Image_filtered = Image_filtered.Erode(3);
        }
开发者ID:Ben-Kaniobi,项目名称:Laserboard,代码行数:15,代码来源:Form1.cs

示例8: ApplyEffect

		private BitmapSource ApplyEffect(EffectType effect, byte[] pixelData, System.Drawing.Bitmap bitmap, Image<Bgra, byte> ocvImage, List<Rectangle> effectRegions)
		{

			// lock the bitmap for writing
			BitmapData data = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height),
												ImageLockMode.WriteOnly, bitmap.PixelFormat);

			// copy the data from pixelData to BitmapData
			Marshal.Copy(pixelData, 0, data.Scan0, pixelData.Length);

			// unlock the bitmap
			bitmap.UnlockBits(data);

			// assign the bitmap to the OpenCV image
			ocvImage.Bitmap = bitmap;

			if(effect != EffectType.None)
			{
				foreach(Rectangle effectRegion in effectRegions)
				{
					// set the Region of Interest based on the joint
					ocvImage.ROI = effectRegion;

					// temp image to hold effect output
					Image<Bgra, byte> ocvTempImg;

					switch(effect)
					{
						case EffectType.Blur:
							ocvTempImg = ocvImage.SmoothBlur(20, 20);
                            break;
						case EffectType.Dilate:
							ocvTempImg = ocvImage.Dilate(5);
							break;
						case EffectType.Erode:
                            ocvTempImg = ocvImage.Erode(5);
							break;
						case EffectType.Edge:
							Image<Gray, byte> gray = ocvImage.Convert<Gray, byte>();
							gray = gray.SmoothBlur(3, 3);
                            gray = gray.Canny(30.0f, 50.0f);
							ocvTempImg = gray.Convert<Bgra, byte>();
							break;
						default:
							throw new ArgumentOutOfRangeException("effect");
					}

					// copy the effect area to the final image
					CvInvoke.cvCopy(ocvTempImg, ocvImage, IntPtr.Zero);
				}
			}

			// reset the Region of Interest
			ocvImage.ROI = Rectangle.Empty;

            #region Convert System.Drawing.Bitmap to WPF BitmapSource
            // get a bitmap handle from the OpenCV image
            IntPtr hBitmap = ocvImage.ToBitmap().GetHbitmap();

            // convert that handle to a WPF BitmapSource
            BitmapSource bitmapSource = Imaging.CreateBitmapSourceFromHBitmap(hBitmap, IntPtr.Zero, Int32Rect.Empty,
                                                                                BitmapSizeOptions.FromWidthAndHeight(
                                                                                bitmap.Width, bitmap.Height));
            // delete the bitmap
            DeleteObject(hBitmap); 
            #endregion

			return bitmapSource;
		}
开发者ID:Cocotus,项目名称:kinect,代码行数:69,代码来源:MainWindow.xaml.cs

示例9: RefreshWindow


//.........这里部分代码省略.........
                }
                feret = (double)ftxMax / ftyMax; */
                observed[4,i] = (double)ftxMax / (Y-y);//feret
                observed[1,i] = (double)(blob[i].Area) / Math.Sqrt(2 * Math.PI * blairsum);//blair

                gestChance[GEST.SLAYER] = dist(slayer, i);
                gestChance[GEST.THUMBLEFT] = dist(thumbleft, i);
                gestChance[GEST.THUMBUP] = dist(thumbup, i);
                gestChance[GEST.SHAKA] = dist(shaka, i);
                gestChance[GEST.FIST] = dist(fist, i);
                gestChance[GEST.VICTORY] = dist(victory, i);
                gestChance[GEST.VOPEN] = dist(vopen, i);
                gestChance[GEST.HOPEN] = dist(hopen, i);
                gestChance[GEST.FINGERS] = dist(fingers, i);
                gestChance[GEST.SCISSORS] = dist(scissors, i);

                //list fold - get key of minimal value
                KeyValuePair<GEST,double> elem = gestChance.Aggregate((l, r) => l.Value < r.Value ? l : r);
                found[i] = (elem.Value < TOLERANCE) ? elem.Key : GEST.BLANK;
                if (elem.Key == GEST.FIST && (double)(X-x)/(Y-y) < .6) {
                    found[i] = GEST.VOPEN;
                }
                gestureLabel[i] = labels[(int)found[i]];
            }

                g1value.Text = gestureLabel[1];
                g2value.Text = gestureLabel[0];

                compactnessLbl.Text = observed[0, 1].ToString(format);
                blairLbl.Text = observed[1, 1].ToString(format);
                malinowskaLbl.Text = observed[2, 1].ToString(format);
                malzmodLbl.Text = observed[3, 1].ToString(format);
                feretLbl.Text = observed[4, 1].ToString(format);

                comp2.Text = observed[0, 0].ToString(format);
                blair2.Text = observed[1, 0].ToString(format);
                mal2.Text = observed[2, 0].ToString(format);
                malz2.Text = observed[3, 0].ToString(format);
                feret2.Text = observed[4, 0].ToString(format);

            /* for blobs not detected */
            for (; i < 2; ++i) {
                observed[0, i] = observed[1, i] = observed[2, i] = observed[3, i] = observed[4, i] = NOT_FOUND;
            }

            imageGray = new Image<Gray, Byte>(bmp);
            imageGray = imageGray.Erode((int)nudErode.Value);
            imageGray = imageGray.Dilate((int)nudDilate.Value);
            imageBox2.Image = imageGray;

            //Zmiana pozycji myszki od środka ciężkości lewej ręki
            if (centerOfGravityLHandX * centerOfGravityLHandY != 0 && !blockMouseControl)
            {
                double smoothness = (double)nudSmoothness.Value;
                double sensitivity = (double)nudSensitivity.Value;
                int newPositionX = screenWidth - (int)(centerOfGravityLHandX / (imageGray.Width * .2) * sensitivity * screenWidth); //- imageGray.Width*1/5
                int newPositionY = (int)((centerOfGravityLHandY - imageGray.Height * .5) / (imageGray.Height * .25) * sensitivity * screenHeight);

                int diffX = Cursor.Position.X + newPositionX;
                int diffY = Cursor.Position.Y - newPositionY;

                newPositionX = Cursor.Position.X - (int)(diffX / smoothness);
                newPositionY = Cursor.Position.Y - (int)(diffY / smoothness);
                MouseSimulating.SetMousePosition(newPositionX, newPositionY);

                //Wyliczanie akcji do podjęcia
                if (found[1] == GEST.BLANK || prevGestureLeft != found[1]) {
                    frameCounterLeft = 0;
                    prevGestureLeft = found[1];
                }

                if (found[0] == GEST.BLANK || prevGestureRight != found[0]) {
                    frameCounterRight = 0;
                    prevGestureRight = found[0];
                }

                if (frameCounterLeft == 30) //ile klatek musi  - 30 kl/s
                {
                    if (prevGestureLeft == GEST.FIST) MouseSimulating.PressLPM();
                    else if (prevGestureLeft == GEST.VOPEN) MouseSimulating.ReleaseLPM();
                    frameCounterLeft = 0;
                } else frameCounterLeft++;

                if (frameCounterRight == 30) {
                    if (prevGestureRight == GEST.FIST) MouseSimulating.ClickLPM();
                    else if (prevGestureRight == GEST.SLAYER) MouseSimulating.ScrollUP(200);
                    else if (prevGestureRight == GEST.VICTORY) MouseSimulating.ScrollDOWN(200);
                    else if (prevGestureRight == GEST.FINGERS) MouseSimulating.ClickPPM();
                    else if (prevGestureRight == GEST.THUMBUP) KeyboardSimulating.SendCtrlC();
                    else if (prevGestureRight == GEST.THUMBLEFT) KeyboardSimulating.SendCtrlV();
                    else if (prevGestureRight == GEST.SCISSORS) KeyboardSimulating.SendCtrlX();
                    else if (prevGestureRight == GEST.HOPEN) { MouseSimulating.ClickLPM(); MouseSimulating.ClickLPM(); }
                    else if (prevGestureRight == GEST.SHAKA) MouseSimulating.ClickMouseButton4();

                    frameCounterRight = 0;
                }
                else frameCounterRight++;

            }
        }
开发者ID:rampler,项目名称:KameraMyszkaEmguCV,代码行数:101,代码来源:KameraMyszka.cs

示例10: ProcessAndView

        public override Image<Rgb, byte> ProcessAndView(Image<Rgb, byte> image)
        {
            image = IsFirstErodeThenDilate ? image.Erode(NumErode).Dilate(NumDilate) : image.Dilate(NumDilate).Erode(NumErode);

            return image;
        }
开发者ID:AlternateIf,项目名称:huddle-engine,代码行数:6,代码来源:ErodeDilate.cs

示例11: Filter

        private void Filter()
        {
            /*//Get red
            Image_filtered = Image_transformed.SmoothBlur(5, 5).InRange(new Bgr(Color.DarkRed), new Bgr(Color.White));//Color.FromArgb(255, 100, 100)));
            box_filtered.Image = Image_filtered.ToBitmap();*/

            //Get average color (HSV) of the point area
            Rectangle rect = new Rectangle(300, 235, 25, 25); //Only for test image!
            Hsv average = Image_transformed.GetSubRect(rect).Convert<Hsv, Byte>().GetAverage();

            //Create thresholds
            Hsv threshold_lower = new Hsv(average.Hue -25, 100, 100);
            Hsv threshold_higher = new Hsv(average.Hue +25, 240, 240);

            //Blur image and find colors between thresholds
            Image_filtered = Image_transformed.Convert<Hsv, Byte>().SmoothBlur(20,20).InRange(threshold_lower, threshold_higher);

            //Reduce size of the point and display image
            Image_filtered = Image_filtered.Erode(4);
            box_filtered.Image = Image_filtered.ToBitmap();
        }
开发者ID:shanky93,项目名称:Laserboard,代码行数:21,代码来源:Form1.cs

示例12: UpdateOccludedObjects

        /// <summary>
        /// Tries to find objects that are occluded.
        /// </summary>
        /// <param name="image"></param>
        /// <param name="updateTime"></param>
        /// <param name="outputImage"></param>
        /// <param name="objects"></param>
        private void UpdateOccludedObjects(Image<Rgb, byte> image, ref Image<Rgb, byte> outputImage, DateTime updateTime, RectangularObject[] objects)
        {
            var imageWidth = image.Width;
            var imageHeight = image.Height;

            var mask = new Image<Gray, byte>(imageWidth, imageHeight);

            var occludedObjects = objects.Where(o => !Equals(o.LastUpdate, updateTime)).ToArray();

            // ignore if no objects are occluded but continue in case is render content set true to update debug view
            if (occludedObjects.Length < 1 && !IsRenderContent)
                return;

            foreach (var obj in occludedObjects)
                mask.Draw(obj.Shape, new Gray(1), -1);

            if (_depthImage == null) return;

            var occludedPartsImage = new Image<Gray, float>(imageWidth, imageHeight);

            var depthMapBinary = _depthImage.ThresholdBinaryInv(new Gray(255), new Gray(255));
            var depthMap = depthMapBinary;

            if (depthMap.Width != imageWidth || depthMap.Height != imageHeight)
            {
                var resizedDepthMap = new Image<Gray, float>(imageWidth, imageHeight);
                CvInvoke.cvResize(depthMap.Ptr, resizedDepthMap.Ptr, INTER.CV_INTER_CUBIC);
                depthMap.Dispose();
                depthMap = resizedDepthMap;
            }

            CvInvoke.cvCopy(depthMap.Ptr, occludedPartsImage.Ptr, mask);
            occludedPartsImage = occludedPartsImage.Erode(2).Dilate(2);

            var debugImage3 = occludedPartsImage.Convert<Rgb, byte>();

            var fixedImage = image.Or(debugImage3);
            fixedImage = fixedImage.Dilate(2).Erode(2);

            var debugImage = fixedImage.Copy();
            Task.Factory.StartNew(() =>
            {
                var bitmapSource = debugImage.ToBitmapSource(true);
                debugImage.Dispose();
                return bitmapSource;
            }).ContinueWith(t => DebugImageSource = t.Result);

            var outputImageEnclosed = outputImage;
            Parallel.ForEach(occludedObjects, obj => FindObjectByBlankingKnownObjects(true, fixedImage, ref outputImageEnclosed, updateTime, objects, obj));
        }
开发者ID:AlternateIf,项目名称:huddle-engine,代码行数:57,代码来源:RectangleTrackerColor.cs

示例13: ProcessImage

        public Image<Bgr, byte> ProcessImage(Image<Bgr, byte> img)
        {
            //return ProcessImageHough(img);

            var inputImage = img.Clone();
            _bg.Update(img);
            img = _bg.BackgroundMask.Convert<Bgr, Byte>();
            _a.OnFrameUpdated(img);
            img = img.Erode(1);
            img = img.Dilate(1);
            _b.OnFrameUpdated(img);
            //img.SmoothBlur(3, 3);
            img = FillBlobs(img);
            //DrawBlobs(img);
            _c.OnFrameUpdated(img);

            //use image as mask to display original image
            var temp = inputImage.Sub(img);
            _d.OnFrameUpdated(temp);

            //float[] BlueHist = GetHistogramData(img[0]);

            //Image<Bgr, byte> image = new Image<Bgr, byte>(img.Width, img.Height);

            //for (int i = 0; i < BlueHist.Length; i++)
            //{
            //    image.Draw(new LineSegment2D(new Point(i, (int)BlueHist[i]), new Point(i, 0)), new Bgr(Color.Red), 1);
            //}

            //_e.OnFrameUpdated(image);

            //only display skin
            img = img.Not();
            //img = DetectSkin(img);
            //img = img.Erode(2);
            //img = img.Dilate(2);
            //img = img.Not();

            //DrawHoughLines(img);

            _e.OnFrameUpdated(ProcessImageLinearOptimization(img));
            //img.MorphologyEx()

            //List<Contour<Point>> allContours;
            //var contours = DetectBlobs(img.Convert<Gray, byte>(), out allContours);

            //Image<Bgr, byte> image = new Image<Bgr, byte>(img.Width, img.Height, new Bgr(Color.White));
            //if (allContours != null)
            //{

            //    foreach (Contour<Point> contour in allContours.Take(3))
            //    {
            //        var convexityDefact = contour.GetConvexityDefacts(new MemStorage(), Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);

            //        foreach (MCvConvexityDefect mCvConvexityDefect in convexityDefact)
            //        {
            //            PointF startPoint = new PointF(mCvConvexityDefect.StartPoint.X, mCvConvexityDefect.StartPoint.Y);
            //            CircleF startCircle = new CircleF(startPoint, 5f);
            //            image.Draw(startCircle, new Bgr(Color.Red), 5);
            //        }
            //        Draw(image, contour, false);
            //        //Draw(image, contour, true);
            //    }
            //}

            //_a.OnFrameUpdated(image);

            return img;
        }
开发者ID:HumanRemote,项目名称:HumanRemote,代码行数:69,代码来源:BackgroundSubtractProcessor.cs

示例14: ProcessFrame

		public void ProcessFrame()
		{
			lock (SyncObject)
			{
				m_OriginalImage = m_Capture.QueryFrame();

				m_ClippedImage = m_OriginalImage.Copy(this.RegionOfInterest);

				// Make the dark portions bigger
				m_ErodedImage = m_ClippedImage.Erode(1);

				//Convert the image to grayscale
				m_GrayImage = m_ErodedImage.Convert<Gray, Byte>(); //.PyrDown().PyrUp();

				m_BlackAndWhiteImage = m_GrayImage.ThresholdBinaryInv(new Gray(m_Threshold), new Gray(255));

				FindBlobsAndDraw(m_BlackAndWhiteImage);
			}
			RaiseChangedEvent();
		}
开发者ID:eldb2,项目名称:robotic-tic-tac-toe-lynxmotion,代码行数:20,代码来源:MainFormModel.cs

示例15: MainLoop

        private void MainLoop()
        {
            CurrentFrame = Cam.QueryFrame().Convert<Hsv, byte>();
            HandImage = new Image<Gray, byte>(CurrentFrame.Size);
            while (!IsDisposed)
            {
                CurrentFrame = Cam.QueryFrame().Convert<Hsv, byte>();
                HandImage.SetZero();

                //肌色領域の抽出
                ExtractSkinColor(CurrentFrame, ref HandImage);

                //ノイズ除去
                HandImage.Erode(20);
                HandImage.Dilate(20);

                imageBox2.Image = HandImage;

                //手の輪郭を抽出し、ジャンケンの手を算出
                Contour<Point> hand_contour = PickupLargestArea(ref HandImage);
                Hands hand = DecideHandFromDefact(hand_contour);
                string msg = "";
                switch (hand)
                {
                    case Hands.PAPER:
                        msg = "パー";
                        break;
                    case Hands.ROCK:
                        msg = "グー";
                        break;
                    case Hands.SCISSORS:
                        msg = "チョキ";
                        break;
                    case Hands.UNKNOWN:
                        msg = "不明。。。";
                        break;
                }

                this.Invoke(new MethodInvoker(delegate() {
                    if (!this.IsDisposed) {
                            textBox_Msg.Text = msg;
                            UpdateParams();
                    }
                }));

                if (hand_contour == null)
                {
                    continue;
                }
                CurrentFrame.DrawPolyline(hand_contour.ToArray(), true, new Hsv(255, 255, 255), 2);
                CurrentFrame.DrawPolyline(hand_contour.GetConvexHull(ORIENTATION.CV_CLOCKWISE).ToArray(), true, new Hsv(50, 100, 50), 1);
                imageBox1.Image = CurrentFrame;

            }
        }
开发者ID:jgmanz,项目名称:asial-arbeit,代码行数:55,代码来源:Form1.cs


注:本文中的Image.Erode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。