当前位置: 首页>>代码示例>>C#>>正文


C# Image.Dilate方法代码示例

本文整理汇总了C#中Image.Dilate方法的典型用法代码示例。如果您正苦于以下问题:C# Image.Dilate方法的具体用法?C# Image.Dilate怎么用?C# Image.Dilate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Image的用法示例。


在下文中一共展示了Image.Dilate方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: ProcessColorImage

        public override Image ProcessColorImage(Bitmap frame, ToteDetectionType detectionType, bool detectBins)
        {
            Image<Bgr, Byte> img = new Image<Bgr, Byte>(frame);

            //// Get The Thresh Image With Given Values
            //Image<Gray, byte> thresh = (threshData as BgrThreshData).InRange(img);
            //// Pixelate Image
            //threshData.Blur(ref thresh);
            //
            //
            //Image ret = base.AnalyzeImage(thresh, detectionType, detectBins);


            //frame.Dispose();
            //thresh.Dispose();

            img = img.SmoothMedian(11);
            img = img.SmoothGaussian(11);
            img = img.Erode(15);
            img = img.Dilate(10);

            // Try this: img.HoughCircles();

            Image<Gray, byte> thresh = img.InRange(new Bgr(110, 130, 100), new Bgr(164, 166, 181));
            Contour<Point> countor = thresh.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
            List<Contour<Point>> PlayingBalls = new List<Contour<Point>>(); ;
            while (countor != null)
            {
                // filter countors
                // convex hull countors
                if (countor.Area > 50)
                    PlayingBalls.Add(countor);
                   
                countor = countor.HNext;
            }
            float resolutionOffset = ((float)thresh.Width * thresh.Height) / (640.0f * 480.0f);


            foreach (Contour<Point> ball in PlayingBalls)
            {
                
                img.Draw(ball, new Bgr(255, 0, 0), (int)Math.Ceiling(3.0f));
                // draw left countors and their min enclosing circle (draw on img)
            }
          



            Image ret = img.ToBitmap();
            img.Dispose();
            return ret;
        }
开发者ID:GreenBlitz4590Programmers,项目名称:StrongHoldVision,代码行数:52,代码来源:ImageProcessor.cs

示例2: Run

        public void Run()
        {
            base.Output = new cImage(Input.Width, Input.Height, Input.Depth, base.ListChannelsToBeProcessed.Count);
            for (int IdxChannel = 0; IdxChannel < base.ListChannelsToBeProcessed.Count; IdxChannel++)
            {
                int CurrentChannel = base.ListChannelsToBeProcessed[IdxChannel];

                Image<Gray, float> inputImage = new Image<Gray, float>(Input.Width, Input.Height);

                for (int j = 0; j < Input.Height; j++)
                    for (int i = 0; i < Input.Width; i++)
                        inputImage.Data[j, i, 0] = Input.SingleChannelImage[CurrentChannel].Data[i + j * Input.Width];

                Image<Gray, float> ProcessedImage = new Image<Gray, float>(inputImage.Width, inputImage.Height);

                ProcessedImage = inputImage.Dilate(this.Iterations);
                this.Output.SingleChannelImage[IdxChannel].SetNewDataFromOpenCV(ProcessedImage);
            }

            return;
        }
开发者ID:cyrenaique,项目名称:HCSA,代码行数:21,代码来源:cImageMorphoDilate.cs

示例3: CreateConfidenceMask

        /// <summary>
        /// Creates a mask of the confidence image with a high confidence value. The mask is later used to merge
        /// the alternating low / high confidence image.
        /// </summary>
        /// <param name="confidenceImage"></param>
        /// <returns></returns>
        private Image<Gray, byte> CreateConfidenceMask(Image<Rgb, byte> confidenceImage)
        {
            var floodFillImage = confidenceImage.Convert<Gray, byte>();

            // TODO Not necessarily required to extract spot with valid depth values in low confidence depth image (high).
            CvInvoke.cvThreshold(floodFillImage.Ptr, floodFillImage.Ptr, 10, 250, THRESH.CV_THRESH_BINARY);

            // Mask need to be two pixels bigger than the source image.
            var width = confidenceImage.Width();
            var height = confidenceImage.Height();

            var shrinkMaskROI = new Rectangle(1, 1, width, height);
            var mask = new Image<Gray, byte>(width + 2, height + 2);

            var seedPoint = new Point(width / 2, height / 2);

            MCvConnectedComp comp;

            // Flood fill segment with lowest pixel value to allow for next segment on next iteration.
            CvInvoke.cvFloodFill(floodFillImage.Ptr,
                seedPoint,
                new MCvScalar(255.0),
                new MCvScalar(10),
                new MCvScalar(10),
                out comp,
                CONNECTIVITY.EIGHT_CONNECTED,
                FLOODFILL_FLAG.DEFAULT,
                mask.Ptr);

            mask = mask.Dilate(6).Erode(15);
            mask.ROI = shrinkMaskROI;

            mask = mask.Mul(255);
            var maskCopy = mask.Copy();
            Task.Factory.StartNew(() =>
            {
                var bitmap = maskCopy.ToBitmapSource(true);
                maskCopy.Dispose();
                return bitmap;
            }).ContinueWith(s => AdaptiveSensingMaskImageSource = s.Result);

            return mask;
        }
开发者ID:AlternateIf,项目名称:huddle-engine,代码行数:49,代码来源:Senz3DIntel.cs

示例4: Filter

        private void Filter()
        {
            // Create thresholds
            Hsv threshold_lower = new Hsv(Color_spot.Hue - 25, 100, 100);
            Hsv threshold_higher = new Hsv(Color_spot.Hue + 25, 240, 240);

            // Blur image and find colors between thresholds
            Image_filtered = Image_transformed.Convert<Hsv, Byte>().SmoothBlur(20, 20).InRange(threshold_lower, threshold_higher);

            // Increase size of the spot and remove possible hole where color was too bright
            Image_filtered = Image_filtered.Dilate(5);

            // Decrease size again a little, makes it smoother
            Image_filtered = Image_filtered.Erode(3);
        }
开发者ID:Ben-Kaniobi,项目名称:Laserboard,代码行数:15,代码来源:Form1.cs

示例5: ApplyEffect

		private BitmapSource ApplyEffect(EffectType effect, byte[] pixelData, System.Drawing.Bitmap bitmap, Image<Bgra, byte> ocvImage, List<Rectangle> effectRegions)
		{

			// lock the bitmap for writing
			BitmapData data = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height),
												ImageLockMode.WriteOnly, bitmap.PixelFormat);

			// copy the data from pixelData to BitmapData
			Marshal.Copy(pixelData, 0, data.Scan0, pixelData.Length);

			// unlock the bitmap
			bitmap.UnlockBits(data);

			// assign the bitmap to the OpenCV image
			ocvImage.Bitmap = bitmap;

			if(effect != EffectType.None)
			{
				foreach(Rectangle effectRegion in effectRegions)
				{
					// set the Region of Interest based on the joint
					ocvImage.ROI = effectRegion;

					// temp image to hold effect output
					Image<Bgra, byte> ocvTempImg;

					switch(effect)
					{
						case EffectType.Blur:
							ocvTempImg = ocvImage.SmoothBlur(20, 20);
                            break;
						case EffectType.Dilate:
							ocvTempImg = ocvImage.Dilate(5);
							break;
						case EffectType.Erode:
                            ocvTempImg = ocvImage.Erode(5);
							break;
						case EffectType.Edge:
							Image<Gray, byte> gray = ocvImage.Convert<Gray, byte>();
							gray = gray.SmoothBlur(3, 3);
                            gray = gray.Canny(30.0f, 50.0f);
							ocvTempImg = gray.Convert<Bgra, byte>();
							break;
						default:
							throw new ArgumentOutOfRangeException("effect");
					}

					// copy the effect area to the final image
					CvInvoke.cvCopy(ocvTempImg, ocvImage, IntPtr.Zero);
				}
			}

			// reset the Region of Interest
			ocvImage.ROI = Rectangle.Empty;

            #region Convert System.Drawing.Bitmap to WPF BitmapSource
            // get a bitmap handle from the OpenCV image
            IntPtr hBitmap = ocvImage.ToBitmap().GetHbitmap();

            // convert that handle to a WPF BitmapSource
            BitmapSource bitmapSource = Imaging.CreateBitmapSourceFromHBitmap(hBitmap, IntPtr.Zero, Int32Rect.Empty,
                                                                                BitmapSizeOptions.FromWidthAndHeight(
                                                                                bitmap.Width, bitmap.Height));
            // delete the bitmap
            DeleteObject(hBitmap); 
            #endregion

			return bitmapSource;
		}
开发者ID:Cocotus,项目名称:kinect,代码行数:69,代码来源:MainWindow.xaml.cs

示例6: FrameGrabber

        /// <summary>
        /// the main function in this class 
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void FrameGrabber(object sender, EventArgs e)
        {
            sw.Start();
            newImage = grabber.QueryFrame();

            count++;
            if (newImage != null)
            {
                current_image = newImage.Convert<Gray, byte>();
                detector.Process(newImage, tempImage);

                tempImage = tempImage.ThresholdBinary(thresholdValue, MaxValue);
                tempImage = tempImage.Dilate(2);
                tempImage = tempImage.SmoothMedian(3);

                newImageG = current_image.ThresholdBinaryInv(new Gray(threshold), new Gray(255d));
                newImageG = newImageG.And(tempImage);
                newImageG = newImageG.Dilate(1);

                if (numberOfHands > 0)
                {
                    int tt = numberOfHands;
                    for (int i = 0; i < tt; i++)
                    {
                        if (x[i] != null)
                        {
                            try
                            {
                                x[i].StartTracking(elapsed_time);
                            }

                     
                            catch(Exception ex)
                            {
                                Console.WriteLine("lost traking : number  of hands {0} & list x {1}", numberOfHands, x.Count);
                                int id = x[i].id;
                                hand_centers[id] = x[i].new_center_pt;
                                hand_centers.Remove(id);
                                x.RemoveAt(id);
                                --numberOfHands;

                            }
                        }

                    }

                }


                if (numberOfHands < hand_detected)
                {
                    detected_hand = HandDetection(newImageG);
                    if (detected_hand.Any())// any elements in the list
                    {
                        foreach (Contour<Point> h in detected_hand)
                        {
                            if (numberOfHands < hand_detected)
                            {

                                y = new HandTracking(current_image.Width, current_image.Height, hand_centers[numberOfHands]);

                                y.ExtractFeatures(h);
                                y.id = numberOfHands;
                                x.Add(y);

                                numberOfHands++;

                            }
                            else
                                Console.WriteLine("there is already 2 hands");
                        }
                        detected_hand.Clear();

                    }
                }

                sw.Stop();
                elapsed_time = sw.Elapsed.TotalMilliseconds;
           
                sw.Reset();
                imageBoxSkin.Image = newImage;
                imageBoxFrameGrabber.Image = newImageG;




            }
        }
开发者ID:phylony,项目名称:handview,代码行数:93,代码来源:Form1.cs

示例7: RefreshWindow


//.........这里部分代码省略.........
                }
                feret = (double)ftxMax / ftyMax; */
                observed[4,i] = (double)ftxMax / (Y-y);//feret
                observed[1,i] = (double)(blob[i].Area) / Math.Sqrt(2 * Math.PI * blairsum);//blair

                gestChance[GEST.SLAYER] = dist(slayer, i);
                gestChance[GEST.THUMBLEFT] = dist(thumbleft, i);
                gestChance[GEST.THUMBUP] = dist(thumbup, i);
                gestChance[GEST.SHAKA] = dist(shaka, i);
                gestChance[GEST.FIST] = dist(fist, i);
                gestChance[GEST.VICTORY] = dist(victory, i);
                gestChance[GEST.VOPEN] = dist(vopen, i);
                gestChance[GEST.HOPEN] = dist(hopen, i);
                gestChance[GEST.FINGERS] = dist(fingers, i);
                gestChance[GEST.SCISSORS] = dist(scissors, i);

                //list fold - get key of minimal value
                KeyValuePair<GEST,double> elem = gestChance.Aggregate((l, r) => l.Value < r.Value ? l : r);
                found[i] = (elem.Value < TOLERANCE) ? elem.Key : GEST.BLANK;
                if (elem.Key == GEST.FIST && (double)(X-x)/(Y-y) < .6) {
                    found[i] = GEST.VOPEN;
                }
                gestureLabel[i] = labels[(int)found[i]];
            }

                g1value.Text = gestureLabel[1];
                g2value.Text = gestureLabel[0];

                compactnessLbl.Text = observed[0, 1].ToString(format);
                blairLbl.Text = observed[1, 1].ToString(format);
                malinowskaLbl.Text = observed[2, 1].ToString(format);
                malzmodLbl.Text = observed[3, 1].ToString(format);
                feretLbl.Text = observed[4, 1].ToString(format);

                comp2.Text = observed[0, 0].ToString(format);
                blair2.Text = observed[1, 0].ToString(format);
                mal2.Text = observed[2, 0].ToString(format);
                malz2.Text = observed[3, 0].ToString(format);
                feret2.Text = observed[4, 0].ToString(format);

            /* for blobs not detected */
            for (; i < 2; ++i) {
                observed[0, i] = observed[1, i] = observed[2, i] = observed[3, i] = observed[4, i] = NOT_FOUND;
            }

            imageGray = new Image<Gray, Byte>(bmp);
            imageGray = imageGray.Erode((int)nudErode.Value);
            imageGray = imageGray.Dilate((int)nudDilate.Value);
            imageBox2.Image = imageGray;

            //Zmiana pozycji myszki od środka ciężkości lewej ręki
            if (centerOfGravityLHandX * centerOfGravityLHandY != 0 && !blockMouseControl)
            {
                double smoothness = (double)nudSmoothness.Value;
                double sensitivity = (double)nudSensitivity.Value;
                int newPositionX = screenWidth - (int)(centerOfGravityLHandX / (imageGray.Width * .2) * sensitivity * screenWidth); //- imageGray.Width*1/5
                int newPositionY = (int)((centerOfGravityLHandY - imageGray.Height * .5) / (imageGray.Height * .25) * sensitivity * screenHeight);

                int diffX = Cursor.Position.X + newPositionX;
                int diffY = Cursor.Position.Y - newPositionY;

                newPositionX = Cursor.Position.X - (int)(diffX / smoothness);
                newPositionY = Cursor.Position.Y - (int)(diffY / smoothness);
                MouseSimulating.SetMousePosition(newPositionX, newPositionY);

                //Wyliczanie akcji do podjęcia
                if (found[1] == GEST.BLANK || prevGestureLeft != found[1]) {
                    frameCounterLeft = 0;
                    prevGestureLeft = found[1];
                }

                if (found[0] == GEST.BLANK || prevGestureRight != found[0]) {
                    frameCounterRight = 0;
                    prevGestureRight = found[0];
                }

                if (frameCounterLeft == 30) //ile klatek musi  - 30 kl/s
                {
                    if (prevGestureLeft == GEST.FIST) MouseSimulating.PressLPM();
                    else if (prevGestureLeft == GEST.VOPEN) MouseSimulating.ReleaseLPM();
                    frameCounterLeft = 0;
                } else frameCounterLeft++;

                if (frameCounterRight == 30) {
                    if (prevGestureRight == GEST.FIST) MouseSimulating.ClickLPM();
                    else if (prevGestureRight == GEST.SLAYER) MouseSimulating.ScrollUP(200);
                    else if (prevGestureRight == GEST.VICTORY) MouseSimulating.ScrollDOWN(200);
                    else if (prevGestureRight == GEST.FINGERS) MouseSimulating.ClickPPM();
                    else if (prevGestureRight == GEST.THUMBUP) KeyboardSimulating.SendCtrlC();
                    else if (prevGestureRight == GEST.THUMBLEFT) KeyboardSimulating.SendCtrlV();
                    else if (prevGestureRight == GEST.SCISSORS) KeyboardSimulating.SendCtrlX();
                    else if (prevGestureRight == GEST.HOPEN) { MouseSimulating.ClickLPM(); MouseSimulating.ClickLPM(); }
                    else if (prevGestureRight == GEST.SHAKA) MouseSimulating.ClickMouseButton4();

                    frameCounterRight = 0;
                }
                else frameCounterRight++;

            }
        }
开发者ID:rampler,项目名称:KameraMyszkaEmguCV,代码行数:101,代码来源:KameraMyszka.cs

示例8: ProcessAndView

        public override Image<Rgb, byte> ProcessAndView(Image<Rgb, byte> image)
        {
            image = IsFirstErodeThenDilate ? image.Erode(NumErode).Dilate(NumDilate) : image.Dilate(NumDilate).Erode(NumErode);

            return image;
        }
开发者ID:AlternateIf,项目名称:huddle-engine,代码行数:6,代码来源:ErodeDilate.cs

示例9: FindGoalRectangles

        private static IEnumerable<Rectangle> FindGoalRectangles(Image<Gray, byte> gray)
        {
            gray = gray.Dilate(3).Erode(6).Dilate(3);

            var contours = gray.Canny(new Gray(100), new Gray(100)).FindContours();

            var rectangles = new List<Rectangle>();
            if (contours != null)
                do
                {
                    if (contours.BoundingRectangle.Width > 15)
                        rectangles.Add(contours.BoundingRectangle);

                    //frameBgr.Draw(contours.BoundingRectangle, new Bgr(Color.Firebrick), 1);
                    //contours.ApproxPoly(contours.Perimeter*0.5, storage);
                    //if (contours.Area < 50) continue;
                    //frame.DrawPolyline(contours.ToArray(), true, new Gray(200), 1);
                } while ((contours = contours.HNext) != null);

            return rectangles;
        }
开发者ID:martikaljuve,项目名称:Robin,代码行数:21,代码来源:VisionExperiments.cs

示例10: ProcessImage

        public Image<Bgr, byte> ProcessImage(Image<Bgr, byte> img)
        {
            //return ProcessImageHough(img);

            var inputImage = img.Clone();
            _bg.Update(img);
            img = _bg.BackgroundMask.Convert<Bgr, Byte>();
            _a.OnFrameUpdated(img);
            img = img.Erode(1);
            img = img.Dilate(1);
            _b.OnFrameUpdated(img);
            //img.SmoothBlur(3, 3);
            img = FillBlobs(img);
            //DrawBlobs(img);
            _c.OnFrameUpdated(img);

            //use image as mask to display original image
            var temp = inputImage.Sub(img);
            _d.OnFrameUpdated(temp);

            //float[] BlueHist = GetHistogramData(img[0]);

            //Image<Bgr, byte> image = new Image<Bgr, byte>(img.Width, img.Height);

            //for (int i = 0; i < BlueHist.Length; i++)
            //{
            //    image.Draw(new LineSegment2D(new Point(i, (int)BlueHist[i]), new Point(i, 0)), new Bgr(Color.Red), 1);
            //}

            //_e.OnFrameUpdated(image);

            //only display skin
            img = img.Not();
            //img = DetectSkin(img);
            //img = img.Erode(2);
            //img = img.Dilate(2);
            //img = img.Not();

            //DrawHoughLines(img);

            _e.OnFrameUpdated(ProcessImageLinearOptimization(img));
            //img.MorphologyEx()

            //List<Contour<Point>> allContours;
            //var contours = DetectBlobs(img.Convert<Gray, byte>(), out allContours);

            //Image<Bgr, byte> image = new Image<Bgr, byte>(img.Width, img.Height, new Bgr(Color.White));
            //if (allContours != null)
            //{

            //    foreach (Contour<Point> contour in allContours.Take(3))
            //    {
            //        var convexityDefact = contour.GetConvexityDefacts(new MemStorage(), Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);

            //        foreach (MCvConvexityDefect mCvConvexityDefect in convexityDefact)
            //        {
            //            PointF startPoint = new PointF(mCvConvexityDefect.StartPoint.X, mCvConvexityDefect.StartPoint.Y);
            //            CircleF startCircle = new CircleF(startPoint, 5f);
            //            image.Draw(startCircle, new Bgr(Color.Red), 5);
            //        }
            //        Draw(image, contour, false);
            //        //Draw(image, contour, true);
            //    }
            //}

            //_a.OnFrameUpdated(image);

            return img;
        }
开发者ID:HumanRemote,项目名称:HumanRemote,代码行数:69,代码来源:BackgroundSubtractProcessor.cs

示例11: MainLoop

        private void MainLoop()
        {
            CurrentFrame = Cam.QueryFrame().Convert<Hsv, byte>();
            HandImage = new Image<Gray, byte>(CurrentFrame.Size);
            while (!IsDisposed)
            {
                CurrentFrame = Cam.QueryFrame().Convert<Hsv, byte>();
                HandImage.SetZero();

                //肌色領域の抽出
                ExtractSkinColor(CurrentFrame, ref HandImage);

                //ノイズ除去
                HandImage.Erode(20);
                HandImage.Dilate(20);

                imageBox2.Image = HandImage;

                //手の輪郭を抽出し、ジャンケンの手を算出
                Contour<Point> hand_contour = PickupLargestArea(ref HandImage);
                Hands hand = DecideHandFromDefact(hand_contour);
                string msg = "";
                switch (hand)
                {
                    case Hands.PAPER:
                        msg = "パー";
                        break;
                    case Hands.ROCK:
                        msg = "グー";
                        break;
                    case Hands.SCISSORS:
                        msg = "チョキ";
                        break;
                    case Hands.UNKNOWN:
                        msg = "不明。。。";
                        break;
                }

                this.Invoke(new MethodInvoker(delegate() {
                    if (!this.IsDisposed) {
                            textBox_Msg.Text = msg;
                            UpdateParams();
                    }
                }));

                if (hand_contour == null)
                {
                    continue;
                }
                CurrentFrame.DrawPolyline(hand_contour.ToArray(), true, new Hsv(255, 255, 255), 2);
                CurrentFrame.DrawPolyline(hand_contour.GetConvexHull(ORIENTATION.CV_CLOCKWISE).ToArray(), true, new Hsv(50, 100, 50), 1);
                imageBox1.Image = CurrentFrame;

            }
        }
开发者ID:jgmanz,项目名称:asial-arbeit,代码行数:55,代码来源:Form1.cs

示例12: MatchIteration


//.........这里部分代码省略.........
            Debug("distance_sc:{0}", distance_sc);

            #endregion

            #region 图像变换和插值
            timer.Restart();
            //[x,y]=meshgrid(1:N2,1:N1);
            //x=x(:);y=y(:);
            Matrix x = null, y = null;
            MatrixUtils.CreateGrid(N1, N2, out x, out y);
            //int M = N1 * N2; // M=length(x);
            d2 = Dist2(X3b, MatrixUtils.RankHorizon(x, y));//d2=dist2(X3b,[x y]);
            U = d2.PointMultiply(d2.Each(v => Math.Log(v + Epsilon)));
            //Transformation(MatrixUtils.RankHorizon(x, y), U, axt, wxt, ayt, wyt, out fx, out fy);
            var fxy = Transformation(MatrixUtils.RankHorizon(x, y), U, axt, wxt, ayt, wyt);

            //disp('computing warped image...')
            //V1w=griddata(reshape(fx,N1,N2),reshape(fy,N1,N2),V1,reshape(x,N1,N2),reshape(y,N1,N2));
            Matrix V1w = Interpolation(
                fxy.GetSubMatrix(0, fxy.Rows, 0, 1).Reshape(N1, N2),
                fxy.GetSubMatrix(0, fxy.Rows, 1, 1).Reshape(N1, N2),
                V1
            );

            #region 这个山寨插值方法会造成图像裂缝,用闭运算来尝试修补
            Image<Gray, Byte> img = new Image<Gray, byte>(N2, N1);
            for (int i = 0; i < N2; ++i) {
                for (int j = 0; j < N1; ++j) {
                    img[i, j] = new Gray(V1w[i, j] * 255);
                }
            }
            var see = new StructuringElementEx(new int[,] { { 1, 1, 1 }, { 1, 1, 1 }, { 1, 1, 1 } }, 1, 1);
            //img = img.MorphologyEx(see, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, 1);
            img = img.Dilate(1).Erode(1);
            for (int i = 0; i < N2; ++i) {
                for (int j = 0; j < N1; ++j) {
                    V1w[i, j] = img[i, j].Intensity / 255;
                }
            }
            img.Dispose();
            #endregion
            timeused += timer.StopAndSay("图像变换和插值");
            #endregion

            //fz=find(isnan(V1w));
            //V1w(fz)=0;
            var ssd = (V2 - V1w).Each(v => v * v);//ssd=(V2-V1w).^2;			%%%%%%SSD在这里
            var ssd_global = ssd.SumAll();//ssd_global=sum(ssd(:));
            Debug("ssd_global:{0}", ssd_global);

            #region figure 5
            if (display_flag) {
                //   figure(5)
                //   subplot(2,2,1)
                //   im(V1)
                //   subplot(2,2,2)
                //   im(V2)
                //   subplot(2,2,4)
                //   im(V1w)
                //   title('V1 after warping')
                //   subplot(2,2,3)
                //   im(V2-V1w)
                //   h=title(['SSD=' num2str(ssd_global)]);
                //   colormap(cmap)
            }
            #endregion
开发者ID:pakerliu,项目名称:sharp-context,代码行数:67,代码来源:MatchImage.cs

示例13: SkinLikelihood

        private void SkinLikelihood(object sender, EventArgs e)
        {
            // Get the current frame from the camera - color and gray
            Image<Bgr, Byte> originalFrame = _capture.QueryFrame();

            // This usually occurs when using a video file - after the last frame is read
            // the next frame is null
            if (originalFrame == null)
            {
                // Reset the camera since no frame was captured - for videos, restart the video playback
                ResetCamera();
                originalFrame = _capture.QueryFrame();
            }

            #region Covariance Matrix

            double CbMean = 0.386090697709818;
            double CrMean = 0.606079492993334;

            Matrix<Double>E=new Matrix<double>(2,1);
            E[0,0]=CbMean; E[1,0]=CrMean;

            //covariance matrix taken from matlab skin detection demo mdl
            double K1 = 4662.55882477405;
            double K2 = 4050.89761683218;
            double K3 = 4050.89761683218;
            double K4 = 5961.62013605372;

            /*double K1 = 1832.85009482496;
            double K2 = 2250.67197529579;
            double K3 = 2250.67197529579;
            double K4 = 6865.825444635298;*/

            Matrix<double>K=new Matrix<double>(2,2);
            K [0,0]=K1;
            K [1,0]=K2;
            K [0,1]=K3;
            K [0,0]=K4;

            #endregion Covariance Matrix

            //capture image
            Image<Bgr, Byte> image = originalFrame.Resize(_frameWidth, _frameHeight);
            capturedImageBox.Image = image;

            //Image<Bgr, Byte> smoothImage = new Image<Bgr, byte>(_frameWidth, _frameHeight);
            //CvInvoke.cvSmooth(image, smoothImage, SMOOTH_TYPE.CV_BILATERAL, 7, 7, 0.5, 0.5);

            //convert to YCbCr colourspace
            Image<Ycc, Byte> yccImage = new Image<Ycc, byte>(_frameWidth, _frameHeight);
            CvInvoke.cvCvtColor(image, yccImage, COLOR_CONVERSION.CV_BGR2YCrCb);

            Image<Gray, Byte> yccBlob = new Image<Gray, Byte>(_frameWidth, _frameHeight);

            //Image<Gray, Byte>[] channels = yccImage.Split();
            //Image<Gray, Double> Cr = channels[1].Convert<Gray, Double>();
            //Image<Gray, Double> Cb = channels[2].Convert<Gray, Double>();
            //Matrix<Double> x =new Matrix<double>(2,1);

            //calculation of the likelihood of pixel being skin
            for (int j = 0; j < yccImage.Width; j++)
            {
                for (int i = 0; i < yccImage.Height; i++)
                {
                    double Cb = yccImage[i, j].Cb / 255.0;
                    double Cr = yccImage[i, j].Cr / 255.0;
                    Cb -= CbMean;
                    Cr -= CrMean;
                    //x[0,0]= Cb[i,j].Intensity/255;
                    //x[1,0]= Cr[i,j].Intensity/255;
                    //double dist = CvInvoke.cvMahalanobis(x, E, K);
                    double CbDist = Cb * (K1 * Cb + K3 * Cr);
                    double CrDist = Cr * (K2 * Cb + K4 * Cr);
                    double dist = CbDist + CrDist;
                    yccBlob[i, j] = new Gray(dist);
                }
            }

            //display likelihood of skin in grayImageBox
            grayImageBox.Image = yccBlob;

            Image<Gray, Byte> dilated = yccBlob.Dilate(1);

            //inverse thresholding the likelihood to get a binary image
            Image<Gray, Byte> thresholded = dilated.ThresholdBinaryInv(new Gray(dilated.GetAverage().Intensity*0.25),new Gray(255));
            //Double minVal, maxVal;
            //Point minLoc, maxLoc;

            // Perform erosion to remove camera noise
            Image<Gray, Byte> eroded = new Image<Gray, Byte>(_frameWidth, _frameHeight);
            CvInvoke.cvErode(thresholded, eroded, IntPtr.Zero, 2);

            motionImageBox.Image = eroded;
        }
开发者ID:cervecero84,项目名称:tracking-people,代码行数:94,代码来源:Form1.cs


注:本文中的Image.Dilate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。