当前位置: 首页>>代码示例>>C#>>正文


C# Image.SmoothMedian方法代码示例

本文整理汇总了C#中Image.SmoothMedian方法的典型用法代码示例。如果您正苦于以下问题:C# Image.SmoothMedian方法的具体用法?C# Image.SmoothMedian怎么用?C# Image.SmoothMedian使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Image的用法示例。


在下文中一共展示了Image.SmoothMedian方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: Run

        public cFeedBackMessage Run()
        {
            base.Start();

            if(base.IsFull3DImage)
                base.Output = new cImage(base.Input, false);
            else
                base.Output = new cImage(Input.Width, Input.Height, 1, base.ListChannelsToBeProcessed.Count);

            object _firstValue = base.ListProperties.FindByName("Kernel Size");
            int KernelSize = 0;
            if (_firstValue == null)
            {
                base.GenerateError("Kernel Size not found !");
                return base.FeedBackMessage;
            }
            try
            {
                cProperty TmpProp = (cProperty)_firstValue;
                KernelSize = (int)TmpProp.GetValue();
            }
            catch (Exception)
            {
                base.GenerateError("Kernel Size cast didn't work");
                return base.FeedBackMessage;
            }

            for (int IdxChannel = 0; IdxChannel < base.ListChannelsToBeProcessed.Count; IdxChannel++)
            {
                int CurrentChannel = base.ListChannelsToBeProcessed[IdxChannel];

                Image<Gray, float> inputImage = new Image<Gray, float>(Input.Width, Input.Height);

                if (base.IsFull3DImage)
                {

                }
                else
                {
                    for (int j = 0; j < Input.Height; j++)
                        for (int i = 0; i < Input.Width; i++)
                            inputImage.Data[j, i, 0] = Input.SingleChannelImage[CurrentChannel].Data[i + j * Input.Width + base.SliceIndex* Input.SliceSize];

                    Image<Gray, float> ProcessedImage = new Image<Gray, float>(inputImage.Width, inputImage.Height);
                    ProcessedImage = inputImage.SmoothMedian(KernelSize);
                    this.Output.SingleChannelImage[IdxChannel].SetNewDataFromOpenCV(ProcessedImage);
                }

            }

            base.End();
            return FeedBackMessage;
        }
开发者ID:cyrenaique,项目名称:HCSA,代码行数:53,代码来源:cImageFilterMedian.cs

示例2: ProcessColorImage

        public override Image ProcessColorImage(Bitmap frame, ToteDetectionType detectionType, bool detectBins)
        {
            Image<Bgr, Byte> img = new Image<Bgr, Byte>(frame);

            //// Get The Thresh Image With Given Values
            //Image<Gray, byte> thresh = (threshData as BgrThreshData).InRange(img);
            //// Pixelate Image
            //threshData.Blur(ref thresh);
            //
            //
            //Image ret = base.AnalyzeImage(thresh, detectionType, detectBins);


            //frame.Dispose();
            //thresh.Dispose();

            img = img.SmoothMedian(11);
            img = img.SmoothGaussian(11);
            img = img.Erode(15);
            img = img.Dilate(10);

            // Try this: img.HoughCircles();

            Image<Gray, byte> thresh = img.InRange(new Bgr(110, 130, 100), new Bgr(164, 166, 181));
            Contour<Point> countor = thresh.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST);
            List<Contour<Point>> PlayingBalls = new List<Contour<Point>>(); ;
            while (countor != null)
            {
                // filter countors
                // convex hull countors
                if (countor.Area > 50)
                    PlayingBalls.Add(countor);
                   
                countor = countor.HNext;
            }
            float resolutionOffset = ((float)thresh.Width * thresh.Height) / (640.0f * 480.0f);


            foreach (Contour<Point> ball in PlayingBalls)
            {
                
                img.Draw(ball, new Bgr(255, 0, 0), (int)Math.Ceiling(3.0f));
                // draw left countors and their min enclosing circle (draw on img)
            }
          



            Image ret = img.ToBitmap();
            img.Dispose();
            return ret;
        }
开发者ID:GreenBlitz4590Programmers,项目名称:StrongHoldVision,代码行数:52,代码来源:ImageProcessor.cs

示例3: createBinaryOtsu

        //Create binary image with otsu threshold
        public Bitmap createBinaryOtsu(Bitmap img)
        {
            Image<Bgra, Byte> image = new Image<Bgra, Byte>(img);

            if (Config.dilateBefore)
                image._Dilate(Config.dilate);

            //image._SmoothGaussian(51);
            image = image.SmoothMedian(Config.median);
            //image = image.SmoothBlur(10, 10);

            image._Dilate(Config.dilate);
            //image._Erode(7);

            //Vorder und Hintergrundtrennung mit OTSU.
            Image<Gray, Byte> greyImage = image.Convert<Gray,Byte>();

            Image<Gray, Byte> otsuImage = new Image<Gray, Byte>(img.Size);

            CvInvoke.cvThreshold(greyImage, otsuImage, 0, 255, Emgu.CV.CvEnum.THRESH.CV_THRESH_OTSU);

            return otsuImage.ToBitmap();
        }
开发者ID:JulianWolfert,项目名称:ImageRegistration,代码行数:24,代码来源:ImageProcessor.cs

示例4: ProcessDepthFrameData

        private unsafe void ProcessDepthFrameData(IntPtr depthFrameData, int frameSize, ushort minDepth, ushort maxDepth, DepthSpacePoint p, bool rec, bool left)
        {
            ushort* frameData = (ushort*)depthFrameData; // depth frame data is a 16 bit value
            ushort initDepth = frameData[depthFrameDescription.Width * ((int)p.Y) + ((int)p.X)];

            if (rec && (bool)chk_recDepth.IsChecked)
            {
                string file = "";
                //FileCode: [left/right]_[gestureNumber]_[sequence]_[sequneceIndex]
                if (left)   file = String.Format("c:/temp/PCD/pcd/dd_left_{0:00}_{1:00}_{2:00}.pcd", gestureNumber, sequenceID, depthFrameIndexL++);
                else        file = String.Format("c:/temp/PCD/pcd/dd_right_{0:00}_{1:00}_{2:00}.pcd", gestureNumber, sequenceID, depthFrameIndexR++);

                pcdData = new StreamWriter(file, false);
            }

            int distanceFactor = 80;
            int index = 0;
            currentFrame = new byte[windowSize * windowSize];

            for (int y = -frameSize; y < frameSize; y++)
            {
                for (int x = -frameSize; x < frameSize; x++)
                {
                    //Select index for smaller frame and get Depth value
                    int offset = (depthFrameDescription.Width * ((int)p.Y + y) + ((int)p.X + x));
                    ushort depth = frameData[offset];

                    bool isNearPalm = depth < initDepth + distanceFactor && depth > initDepth - distanceFactor;         
                    depth = isNearPalm ? (ushort)(depth + (depth - initDepth) * 10) : (ushort)0;
                    depthPixels[index] = currentFrame[index] = (byte)(depth / MapDepthToByte);
                    index++;

                    //  ==== Record DepthData for nextStep (Segmentation)
                    if ((bool)chk_recDepth.IsChecked && rec)
                    {
                        if (isNearPalm)
                        {
                            var point = Helper.depthToPCD(p.X + (float)x, p.Y + (float)y, depth);
                            pcdData.WriteLine(String.Format("{0} {1} {2}", point.X.ToString().Replace(',', '.'), point.Y.ToString().Replace(',', '.'), point.Z.ToString().Replace(',', '.')));
                            pcdData.Flush();
                        }
                    }
                }
            }

            if ((bool)chk_recDepth.IsChecked && rec)
                pcdData.Close();

            //============== Opt Flow ========
            var thisPreviousFrame = left ? previousFrameL : previousFrameR;
            Image<Gray, byte> prevImg = new Image<Gray, byte>(arrayToBitmap(thisPreviousFrame, frameSize * 2, frameSize * 2));
            Image<Gray, byte> currentImg = new Image<Gray, byte>(arrayToBitmap(currentFrame, frameSize * 2, frameSize * 2));
            Image<Gray, float> flowX = new Image<Gray, float>(new System.Drawing.Size(frameSize * 2, frameSize * 2));
            Image<Gray, float> flowY = new Image<Gray, float>(new System.Drawing.Size(frameSize * 2, frameSize * 2));
            var winSize = new System.Drawing.Size(5, 5);

            try
            {                
                currentImg = currentImg.SmoothMedian(5);
                OpticalFlow.LK(prevImg, currentImg, winSize, flowX, flowY);
                var bytes = (flowX.Convert<Gray, byte>() + flowY.Convert<Gray, byte>()).Bytes;
                var flow = new Image<Gray,byte>(frameSize * 2, frameSize * 2, new Gray (bytes.Sum(e => e) / bytes.Length));

                if (left)
                {
                    previousFrameL = currentFrame;
                    this.flowBitmapLeft.WritePixels(new Int32Rect(0, 0, flow.Bitmap.Width, flow.Bitmap.Height), flow.Bytes, flow.Bitmap.Width, 0);
                }
                else
                {
                    previousFrameR = currentFrame;
                    this.flowBitmapRight.WritePixels(new Int32Rect(0, 0, flow.Bitmap.Width, flow.Bitmap.Height), flow.Bytes, flow.Bitmap.Width, 0);
                }
            }
            catch { Console.WriteLine("Optical Flow Exception"); }
            //============== OF ========
        }
开发者ID:madingo87,项目名称:netica,代码行数:77,代码来源:KinectApp.xaml.cs

示例5: PollDepth

        }//fin PollDepth() 
        //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::



        //::::::::::::Remove the noise ina a gray image:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
        public Image<Gray, Byte> removeNoise(Image<Gray, Byte> imagenKinet, int sizeWindow)
        {
            Image<Gray, Byte> imagenSinRuido;

            imagenSinRuido = imagenKinet.SmoothMedian(sizeWindow);

            return imagenSinRuido;
        }//endremoveNoise  
开发者ID:americamm,项目名称:SystemVersions,代码行数:14,代码来源:GetKinectData.cs

示例6: FrameGrabber

        /// <summary>
        /// the main function in this class 
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void FrameGrabber(object sender, EventArgs e)
        {
            sw.Start();
            newImage = grabber.QueryFrame();

            count++;
            if (newImage != null)
            {
                current_image = newImage.Convert<Gray, byte>();
                detector.Process(newImage, tempImage);

                tempImage = tempImage.ThresholdBinary(thresholdValue, MaxValue);
                tempImage = tempImage.Dilate(2);
                tempImage = tempImage.SmoothMedian(3);

                newImageG = current_image.ThresholdBinaryInv(new Gray(threshold), new Gray(255d));
                newImageG = newImageG.And(tempImage);
                newImageG = newImageG.Dilate(1);

                if (numberOfHands > 0)
                {
                    int tt = numberOfHands;
                    for (int i = 0; i < tt; i++)
                    {
                        if (x[i] != null)
                        {
                            try
                            {
                                x[i].StartTracking(elapsed_time);
                            }

                     
                            catch(Exception ex)
                            {
                                Console.WriteLine("lost traking : number  of hands {0} & list x {1}", numberOfHands, x.Count);
                                int id = x[i].id;
                                hand_centers[id] = x[i].new_center_pt;
                                hand_centers.Remove(id);
                                x.RemoveAt(id);
                                --numberOfHands;

                            }
                        }

                    }

                }


                if (numberOfHands < hand_detected)
                {
                    detected_hand = HandDetection(newImageG);
                    if (detected_hand.Any())// any elements in the list
                    {
                        foreach (Contour<Point> h in detected_hand)
                        {
                            if (numberOfHands < hand_detected)
                            {

                                y = new HandTracking(current_image.Width, current_image.Height, hand_centers[numberOfHands]);

                                y.ExtractFeatures(h);
                                y.id = numberOfHands;
                                x.Add(y);

                                numberOfHands++;

                            }
                            else
                                Console.WriteLine("there is already 2 hands");
                        }
                        detected_hand.Clear();

                    }
                }

                sw.Stop();
                elapsed_time = sw.Elapsed.TotalMilliseconds;
           
                sw.Reset();
                imageBoxSkin.Image = newImage;
                imageBoxFrameGrabber.Image = newImageG;




            }
        }
开发者ID:phylony,项目名称:handview,代码行数:93,代码来源:Form1.cs

示例7: SmoothImage

 public static Image<Bgr, byte> SmoothImage(Image<Bgr, byte> image)
 {
     //median smoothing with a size of 5 works good
     return image.SmoothMedian(5);
 }
开发者ID:doskir,项目名称:Drumbot,代码行数:5,代码来源:CapturedImage.cs

示例8: SkinDetect

        public Image<Gray, byte> SkinDetect(Image<Bgr, byte> Img)
        {
            Image<Gray, byte> R = new Image<Gray, byte>(Img.Width, Img.Height);
            Image<Gray, byte> G = new Image<Gray, byte>(Img.Width, Img.Height);
            Image<Gray, byte> B = new Image<Gray, byte>(Img.Width, Img.Height);

            CvInvoke.cvSplit(Img, B, G, R, IntPtr.Zero);

            Image<Gray, byte> S = new Image<Gray, byte>(Img.Width, Img.Height);
            Image<Gray, byte> skin = new Image<Gray, byte>(Img.Width, Img.Height);

            /* convert RGB color space to IRgBy color space using this formula:
            http://www.cs.hmc.edu/~fleck/naked-skin.html
            I = L(G)
            Rg = L(R) - L(G)
            By = L(B) - [L(G) +L(R)] / 2

            to calculate the hue:
            hue = atan2(Rg,By) * (180 / 3.141592654f)
            Saturation = sqrt(Rg^2 + By^2)
            */

            for (int j = 0; j < skin.Width; j++)
            {
                for (int i = 0; i < skin.Height; i++)
                {
                    //double I_val = (Math.Log(R[i, j].Intensity) + Math.Log(B[i, j].Intensity) + Math.Log(G[i, j].Intensity)) / 3;
                    //I[i, j] = new Gray(G[i, j].Intensity);

                    double Rg = Math.Log(R[i, j].Intensity) - Math.Log(G[i, j].Intensity);
                    double By = Math.Log(B[i, j].Intensity) - (Math.Log(G[i, j].Intensity) + Math.Log(R[i, j].Intensity)) / 2;

                    double hue_val= Math.Atan2(Rg, By) * (180 / Math.PI);
                    double sat_val = Math.Sqrt(Rg*Rg+ By *By);

                    if (sat_val * 255 >= 20 && sat_val * 255 <= 130 && hue_val >= 110 && hue_val <= 170) //I simplified the naked people filter's two overlapping criteria
                    {
                        S[i, j] = new Gray(255);
                    }
                    else
                    {
                        S[i, j] = new Gray(0);
                    }
                }
            }

            //skin = S.Erode(1);
            skin = S.SmoothMedian(15); // median filter is used so that the image will be kept black and white

            return skin;
        }
开发者ID:cervecero84,项目名称:tracking-people,代码行数:51,代码来源:Form1.cs


注:本文中的Image.SmoothMedian方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。