当前位置: 首页>>代码示例>>C#>>正文


C# Image.SetZero方法代码示例

本文整理汇总了C#中Image.SetZero方法的典型用法代码示例。如果您正苦于以下问题:C# Image.SetZero方法的具体用法?C# Image.SetZero怎么用?C# Image.SetZero使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Image的用法示例。


在下文中一共展示了Image.SetZero方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: binaryNiBlack

        //:::::::::::::::::fin variables::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::


        //:::::::::::::Method for make the image binary::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
        //the binarization is inspired in NiBlanck banarization, but in this case, we use just the average of the image. 
        //openinOperation() remove the noise of the binarized image, using morphological operation, we use opening. 

        private Image<Gray, Byte> binaryNiBlack(Image<Gray, Byte> handFrame)
        {
            int widthFrame = handFrame.Width;
            int heigthFrame = handFrame.Height;

            int sizeSW = 4;
            int sizeSW_w = sizeSW; //Size of the slinding window 
            int sizeSW_h = sizeSW; //Size of the slinding window 
            int halfWidth = (int)(Math.Floor((double)sizeSW / 2));
            int halfHeigth = (int)(Math.Floor((double)sizeSW / 2));
            int binaryWidth = widthFrame + halfWidth * 2;
            int binaryHeigth = heigthFrame + halfHeigth * 2;
            double k = .6;

            Image<Gray, Byte> binaryFrameCalculation = new Image<Gray, Byte>(binaryWidth, binaryHeigth);
            binaryFrameCalculation.SetZero();
            Rectangle roiHand = new Rectangle(halfWidth, halfHeigth, widthFrame, heigthFrame);
            binaryFrameCalculation.ROI = roiHand;
            handFrame.CopyTo(binaryFrameCalculation);
            binaryFrameCalculation.ROI = Rectangle.Empty;

            byte[, ,] byteData = handFrame.Data;

            for (int i = halfHeigth; i < heigthFrame + halfHeigth; i++)
            {
                for (int j = halfWidth; j < widthFrame + halfWidth; j++)
                {
                    Gray media;
                    MCvScalar desest;
                    MCvScalar mediaValue;
                    double threshold;
                    MCvBox2D roi;

                    Image<Gray, Byte> imageCalculate = new Image<Gray, Byte>(sizeSW_w, sizeSW_h);
                    roi = new MCvBox2D(new System.Drawing.Point(j, i), new System.Drawing.Size(sizeSW_w, sizeSW_h), 0);

                    imageCalculate = binaryFrameCalculation.Copy(roi);
                    binaryFrameCalculation.ROI = Rectangle.Empty;
                    imageCalculate.AvgSdv(out media, out desest);
                    mediaValue = media.MCvScalar;
                    threshold = mediaValue.v0 + (k * desest.v0);

                    if (byteData[i - halfHeigth, j - halfWidth, 0] < threshold)
                        byteData[i - halfHeigth, j - halfWidth, 0] = 255;
                    else
                        byteData[i - halfHeigth, j - halfWidth, 0] = 0;
                }
            }

            handFrame.Data = byteData;
            return handFrame;
        }
开发者ID:americamm,项目名称:SystemVersions,代码行数:59,代码来源:HandSegmentation.cs

示例2: setDstFaceParam

        private void setDstFaceParam()
        {
            dstSize = srcSize;
            dstFace = new Image<Bgr, byte>(dstSize);
            dstFace.SetZero();

            dstLandmark = new PointF[pointNum];
            for (int cnt = 0; cnt < pointNum; cnt++)
            {
                dstLandmark[cnt] = new PointF(
                    (float)((landmarkA[cnt].X + landmarkB[cnt].X) * srcSize.Width * integrationRatio),
                    (float)((landmarkA[cnt].Y + landmarkB[cnt].Y) * srcSize.Height * (1 - integrationRatio)));
            }
        }
开发者ID:TeamColdTea,项目名称:WCF,代码行数:14,代码来源:FaceIntegration.cs

示例3: GenerateFilterMask

 /// <summary>
 /// generate a sharp filter
 /// </summary>
 /// <param name="size"></param>
 /// <param name="isHighPass"></param>
 /// <param name="width"></param>
 /// <returns></returns>
 internal static Image<Gray, float> GenerateFilterMask(Size size, bool isHighPass, int width)
 {
     Image<Gray, float> mask = new Image<Gray, float>(size);
     if (isHighPass)
     {
         mask.SetValue(1);
         mask.Draw(new CircleF(new PointF(size.Width / 2, size.Height / 2), width), new Gray(0), 0);
     }
     else
     {
         mask.SetZero();
         mask.Draw(new CircleF(new PointF(size.Width / 2, size.Height / 2), width), new Gray(1), 0);
     }
     return mask;
 }
开发者ID:jmdbo,项目名称:SS,代码行数:22,代码来源:FFT.cs

示例4: integrateFace

        public Image<Bgr, byte> integrateFace()
        {
            Mat srcRotMatA = new Mat();
            Mat srcRotMatB = new Mat();

            srcRotMatA = CvInvoke.GetAffineTransform(srcLandmarkA, dstLandmark);
            srcRotMatB = CvInvoke.GetAffineTransform(srcLandmarkB, dstLandmark);
            Image<Bgr, byte> srcWarpA = new Image<Bgr, byte>(dstSize);
            Image<Bgr, byte> srcWarpB = new Image<Bgr, byte>(dstSize);
            srcWarpA.SetZero();
            srcWarpB.SetZero();
            CvInvoke.WarpAffine(faceImgA, srcWarpA, srcRotMatA, dstSize);
            CvInvoke.WarpAffine(faceImgB, srcWarpB, srcRotMatB, dstSize);

            dstFace = integrationRatio * faceImgA + (1 - integrationRatio) * faceImgB;

            return dstFace;
        }
开发者ID:TeamColdTea,项目名称:WCF,代码行数:18,代码来源:FaceIntegration.cs

示例5: fuseRegion

        public void fuseRegion()
        {
            PointF[] srcSet = new PointF[] {srcLandmark[0],
                srcLandmark[1],
                srcLandmark[2]
            };

            PointF[] dstSet = new PointF[] {dstLandmark[0],
                dstLandmark[1],
                dstLandmark[2]
            };

            PointF[] itermediateSet = new PointF[] {itermediateLandmark[0],
                itermediateLandmark[1],
                itermediateLandmark[2]
            };

            Mat srcRotMat = new Mat();
            Mat dstRotMat = new Mat();

            srcRotMat = CvInvoke.GetAffineTransform(srcSet, itermediateSet);
            dstRotMat = CvInvoke.GetAffineTransform(dstSet, itermediateSet);

            Image<Bgr, byte> srcWarp = new Image<Bgr, byte>(imgSize);
            srcWarp.SetZero();
            Image<Bgr, byte> dstWarp = new Image<Bgr, byte>(imgSize);
            dstWarp.SetZero();

            CvInvoke.WarpAffine(srcFaceImg, srcWarp, srcRotMat, imgSize);
            CvInvoke.WarpAffine(dstFaceImg, dstWarp, dstRotMat, imgSize);

            intermediateFaceImg = srcWarp / 2 + dstWarp / 2;

            srcWarp.Save("D:\\Codes\\datasets\\face_morph\\result_bbt_face_warp_1.jpg");
            dstWarp.Save("D:\\Codes\\datasets\\face_morph\\result_bbt_face_warp_2.jpg");
            intermediateFaceImg.Save("D:\\Codes\\datasets\\face_morph\\result_bbt_face_warp.jpg");
        }
开发者ID:TeamColdTea,项目名称:coldtea-algorithm,代码行数:37,代码来源:Program.cs

示例6: setItermediateFaceImg

 public void setItermediateFaceImg()
 {
     Image<Bgr, byte> inputImg = new Image<Bgr, byte>(imgSize);
     inputImg.SetZero();
     intermediateFaceImg = inputImg.Clone();
 }
开发者ID:TeamColdTea,项目名称:coldtea-algorithm,代码行数:6,代码来源:Program.cs

示例7: GetFFTReAndIm

        /// <summary>
        /// Calculate image FFT and return real and imaginary 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="fft_Re"></param>
        /// <param name="fft_Im"></param>
        private static Size GetFFTReAndIm(Image<Bgr, byte> img, out IntPtr imageFFT)
        {
            // First create image with optimal size and copy the content

            //get optimal size
            int wOptim = CvInvoke.cvGetOptimalDFTSize(img.Width);
            int hOptim = CvInvoke.cvGetOptimalDFTSize(img.Height);

            //create empty image
            Image<Bgr, byte> src1 = new Image<Bgr, byte>(wOptim, hOptim);
            src1.SetZero();

            // copy original to src
            src1.ROI = new Rectangle(0, 0, img.Width, img.Height);
            img.Copy(src1, null);
            src1.ROI = Rectangle.Empty;

            // prepare image with 2 channels for DFT
            Image<Gray, float> imgFFT_Re = src1.Convert<Gray, float>();
            Image<Gray, float> imgFFT_Im = src1.Convert<Gray, float>();
            imgFFT_Im.SetZero();

            //merge the 2 channels into one image
            imageFFT = CvInvoke.cvCreateImage(src1.Size, Emgu.CV.CvEnum.IPL_DEPTH.IPL_DEPTH_32F, 2);
            CvInvoke.cvMerge(imgFFT_Re, imgFFT_Im, System.IntPtr.Zero, System.IntPtr.Zero, imageFFT);

            // calculate DFT
            CvInvoke.cvDFT(imageFFT, imageFFT, Emgu.CV.CvEnum.CV_DXT.CV_DXT_FORWARD, 0);

            return new Size(wOptim, hOptim);
        }
开发者ID:jmdbo,项目名称:SS,代码行数:37,代码来源:FFT.cs

示例8: TestKalman

      //[Test]
      public void TestKalman()
      {
         Image<Bgr, Byte> img = new Image<Bgr, byte>(400, 400);

         SyntheticData syntheticData = new SyntheticData();

         //Matrix<float> state = new Matrix<float>(new float[] { 0.0f, 0.0f}); //initial guess

         #region initialize Kalman filter
         KalmanFilter tracker = new KalmanFilter(2, 1, 0);
         syntheticData.TransitionMatrix.Mat.CopyTo(tracker.TransitionMatrix);
         syntheticData.MeasurementMatrix.Mat.CopyTo(tracker.MeasurementMatrix);

         syntheticData.ProcessNoise.Mat.CopyTo(tracker.ProcessNoiseCov);
         syntheticData.MeasurementNoise.Mat.CopyTo(tracker.MeasurementNoiseCov);
         syntheticData.ErrorCovariancePost.Mat.CopyTo(tracker.ErrorCovPost);
         tracker.StatePost.SetTo(new float[] { 0.0f, 0.0f });
         #endregion 

         System.Converter<double, PointF> angleToPoint = 
            delegate(double radianAngle)
            {
               return new PointF(
                  (float)(img.Width / 2 + img.Width / 3 * Math.Cos(radianAngle)),
                  (float)(img.Height / 2 - img.Width / 3 * Math.Sin(radianAngle)));
            };

         Action<PointF, Bgr> drawCross =
           delegate(PointF point, Bgr color)
           {
              img.Draw(new Cross2DF(point, 15, 15), color, 1);
           };

         ImageViewer viewer = new ImageViewer();
         System.Windows.Forms.Timer timer = new System.Windows.Forms.Timer();
         timer.Interval = 200;
         timer.Tick += new EventHandler(delegate(object sender, EventArgs e)
         {
            Matrix<float> measurement = syntheticData.GetMeasurement();
            // adjust Kalman filter state 
            tracker.Correct(measurement.Mat);

            tracker.Predict();

            #region draw the state, prediction and the measurement

            float[] correctedState = new float[2];
            float[] predictedState = new float[2];
            tracker.StatePost.CopyTo(correctedState);
            tracker.StatePre.CopyTo(predictedState);
            PointF statePoint = angleToPoint(correctedState[0]);
            PointF predictPoint = angleToPoint(predictedState[0]);
            PointF measurementPoint = angleToPoint(measurement[0, 0]);

            img.SetZero(); //clear the image
            drawCross(statePoint, new Bgr(Color.White)); //draw current state in White
            drawCross(measurementPoint, new Bgr(Color.Red)); //draw the measurement in Red
            drawCross(predictPoint, new Bgr(Color.Green)); //draw the prediction (the next state) in green 
            img.Draw(new LineSegment2DF(statePoint, predictPoint), new Bgr(Color.Magenta), 1); //Draw a line between the current position and prediction of next position 

            //Trace.WriteLine(String.Format("Velocity: {0}", tracker.CorrectedState[1, 0]));
            #endregion

            syntheticData.GoToNextState();

            viewer.Image = img;
         });
         timer.Start();
         viewer.Disposed += delegate(Object sender, EventArgs e) { timer.Stop(); };
         viewer.Text = "Actual State: White; Measurement: Red; Prediction: Green";
         viewer.ShowDialog();
      }
开发者ID:neutmute,项目名称:emgucv,代码行数:73,代码来源:Class1.cs

示例9: ExtractContourImage

        private static Image<Bgr, Byte> ExtractContourImage(Image<Bgr, Byte> image, Contour<Point> contour, out Image<Gray, Byte> mask)
        {
            mask = image.Convert<Gray, Byte>();
            mask.SetZero();
            mask.Draw(contour, new Gray(255), new Gray(0), 2, -1);

            return image.And(new Bgr(255, 255, 255), mask);
        }
开发者ID:LoyVanBeek,项目名称:SetVision,代码行数:8,代码来源:ContourAnalyzer.cs

示例10: ExtractContourImage

        private Image<Bgr, Byte> ExtractContourImage(Image<Bgr, Byte> source, Contour<Point> contour, out Image<Gray, Byte> mask)
        {
            mask = source.Convert<Gray, Byte>();
            mask.SetZero();
            //Contour<Point> shifted = ShiftContour(contour, -3,-3);
            mask.Draw(contour, new Gray(255), new Gray(0), 2, -1);

            return source.And(new Bgr(255, 255, 255), mask);
        }
开发者ID:LoyVanBeek,项目名称:SetVision,代码行数:9,代码来源:ContourNode.cs

示例11: PickupLargestArea

 //
 private Contour<Point> PickupLargestArea(ref Image<Gray, byte> src)
 {
     Contour<Point> contour =  src.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_EXTERNAL);
     if (contour == null)
     {
         return null;
     }
     double max = 0;
     Contour<Point> largest = contour;
     //最大領域を取得
     while (contour != null)
     {
         if (contour.Area > max)
         {
             max = contour.Area;
             largest = contour;
         }
         contour = contour.HNext;
     }
     
     //src内の最大領域のみを抽出
     src.SetZero();
     src.DrawPolyline(largest.ApproxPoly(13).ToArray(), true, new Gray(255), -1);
     return largest.ApproxPoly(13);
 }
开发者ID:jgmanz,项目名称:asial-arbeit,代码行数:26,代码来源:Form1.cs

示例12: MainLoop

        private void MainLoop()
        {
            CurrentFrame = Cam.QueryFrame().Convert<Hsv, byte>();
            HandImage = new Image<Gray, byte>(CurrentFrame.Size);
            while (!IsDisposed)
            {
                CurrentFrame = Cam.QueryFrame().Convert<Hsv, byte>();
                HandImage.SetZero();

                //肌色領域の抽出
                ExtractSkinColor(CurrentFrame, ref HandImage);

                //ノイズ除去
                HandImage.Erode(20);
                HandImage.Dilate(20);

                imageBox2.Image = HandImage;

                //手の輪郭を抽出し、ジャンケンの手を算出
                Contour<Point> hand_contour = PickupLargestArea(ref HandImage);
                Hands hand = DecideHandFromDefact(hand_contour);
                string msg = "";
                switch (hand)
                {
                    case Hands.PAPER:
                        msg = "パー";
                        break;
                    case Hands.ROCK:
                        msg = "グー";
                        break;
                    case Hands.SCISSORS:
                        msg = "チョキ";
                        break;
                    case Hands.UNKNOWN:
                        msg = "不明。。。";
                        break;
                }

                this.Invoke(new MethodInvoker(delegate() {
                    if (!this.IsDisposed) {
                            textBox_Msg.Text = msg;
                            UpdateParams();
                    }
                }));

                if (hand_contour == null)
                {
                    continue;
                }
                CurrentFrame.DrawPolyline(hand_contour.ToArray(), true, new Hsv(255, 255, 255), 2);
                CurrentFrame.DrawPolyline(hand_contour.GetConvexHull(ORIENTATION.CV_CLOCKWISE).ToArray(), true, new Hsv(50, 100, 50), 1);
                imageBox1.Image = CurrentFrame;

            }
        }
开发者ID:jgmanz,项目名称:asial-arbeit,代码行数:55,代码来源:Form1.cs

示例13: integrateRegion

        private void integrateRegion(PointF[] _pointA, PointF[] _pointB, int _polyCount)
        {
            PointF[] pointA = convertPointF(_pointA, _polyCount);
            Image<Gray, byte> maskA = new Image<Gray, byte>(srcSize);
            VectorOfVectorOfPoint pointSetA = new VectorOfVectorOfPoint(new VectorOfPoint(convertPointF2Point(pointA, _polyCount)));
            CvInvoke.FillPoly(maskA, pointSetA, new MCvScalar(255), LineType.EightConnected);
            Image<Bgr, byte> tempA = new Image<Bgr, byte>(srcSize);
            tempA = faceImgA.Copy(maskA);

            PointF[] pointB = convertPointF(_pointB, _polyCount);
            Image<Gray, byte> maskB = new Image<Gray, byte>(srcSize);
            VectorOfVectorOfPoint pointSetB = new VectorOfVectorOfPoint(new VectorOfPoint(convertPointF2Point(pointB, _polyCount)));
            CvInvoke.FillPoly(maskB, pointSetB, new MCvScalar(255), LineType.EightConnected);
            Image<Bgr, byte> tempB = new Image<Bgr, byte>(srcSize);
            tempB = faceImgB.Copy(maskB);

            PointF[] dstPoint = getDstPoint(pointA, pointB, _polyCount);

            Mat srcRotMatA = new Mat();
            Mat srcRotMatB = new Mat();

            if (_polyCount == 3)
            {
                srcRotMatA = CvInvoke.GetAffineTransform(pointA, dstPoint);
                srcRotMatB = CvInvoke.GetAffineTransform(pointB, dstPoint);
            }
            else if (_polyCount == 4)
            {
                srcRotMatA = CvInvoke.GetPerspectiveTransform(pointA, dstPoint);
                srcRotMatB = CvInvoke.GetPerspectiveTransform(pointB, dstPoint);
            }
            Image<Bgr, byte> srcWarpA = new Image<Bgr, byte>(dstSize);
            Image<Bgr, byte> srcWarpB = new Image<Bgr, byte>(dstSize);
            Image<Gray, byte> maskWarpA = new Image<Gray, byte>(dstSize);
            Image<Gray, byte> maskWarpB = new Image<Gray, byte>(dstSize);
            srcWarpA.SetZero();
            srcWarpB.SetZero();
            maskWarpA.SetZero();
            maskWarpB.SetZero();

            if (_polyCount == 3)
            {
                CvInvoke.WarpAffine(tempA, srcWarpA, srcRotMatA, dstSize);
                CvInvoke.WarpAffine(tempB, srcWarpB, srcRotMatB, dstSize);
                CvInvoke.WarpAffine(maskA, maskWarpA, srcRotMatA, dstSize);
                CvInvoke.WarpAffine(maskB, maskWarpB, srcRotMatB, dstSize);
            }
            else if (_polyCount == 4)
            {
                CvInvoke.WarpPerspective(tempA, srcWarpA, srcRotMatA, dstSize);
                CvInvoke.WarpPerspective(tempB, srcWarpB, srcRotMatB, dstSize);
                CvInvoke.WarpPerspective(maskA, maskWarpA, srcRotMatA, dstSize);
                CvInvoke.WarpPerspective(maskB, maskWarpB, srcRotMatB, dstSize);
            }

            maskWarpA = maskWarpA - (maskWarpA & dstMask) * 255;
            maskWarpB = maskWarpB - (maskWarpB & dstMask) * 255;
            dstMask = dstMask + (maskWarpA & maskWarpB) * 255;
            srcWarpA = srcWarpA.Copy(maskWarpA);
            srcWarpB = srcWarpB.Copy(maskWarpB);
            CvInvoke.AddWeighted(srcWarpA, integrationRatio, srcWarpB, 1 - integrationRatio, 0, srcWarpA);
            CvInvoke.Add(dstFace, srcWarpA, dstFace);
        }
开发者ID:TeamColdTea,项目名称:coldtea-algorithm,代码行数:63,代码来源:FaceIntegration.cs


注:本文中的Image.SetZero方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。