当前位置: 首页>>代码示例>>C#>>正文


C# Image.Draw方法代码示例

本文整理汇总了C#中System.Windows.Controls.Image.Draw方法的典型用法代码示例。如果您正苦于以下问题:C# Image.Draw方法的具体用法?C# Image.Draw怎么用?C# Image.Draw使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在System.Windows.Controls.Image的用法示例。


在下文中一共展示了Image.Draw方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: image1_Initialized

        private void image1_Initialized(object sender, EventArgs e)
        {
            Image<Bgr, Byte> image = new Image<Bgr, byte>(400, 100, new Bgr(255, 255, 255));
            MCvFont f = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_PLAIN, 3.0, 3.0);
            image.Draw("Hello, world", ref f, new System.Drawing.Point(10, 50), new Bgr(255.0, 0.0, 0.0));

            image1.Source = ToBitmapSource(image);
        }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:8,代码来源:Window1.xaml.cs

示例2: DrawSet

        private static void DrawSet(Image<Bgr, Byte> table, Dictionary<Card, System.Drawing.Point> cards, Random rnd, List<Card> set)
        {
            Bgr setcolor = new Bgr(rnd.Next(255), rnd.Next(255), rnd.Next(255));
            List<System.Drawing.Point> centers = new List<System.Drawing.Point>();

            foreach (Card card in set)
            {
                System.Drawing.Point p = cards[card];
                PointF center = new PointF(p.X, p.Y);
                centers.Add(p);
                CircleF circle = new CircleF(center, 50);
                table.Draw(circle, setcolor, 2);
            }

            table.DrawPolyline(centers.ToArray(), true, setcolor, 5);
        }
开发者ID:LoyVanBeek,项目名称:SetVision,代码行数:16,代码来源:Window1.xaml.cs

示例3: DoNormalDetection

        // FaceDetection in the normal way
        public override void DoNormalDetection(string imagePath)
        {
            _image = new Image<Bgr, byte>(imagePath); //Read the files as an 8-bit Bgr image  
            _egray = _image.Convert<Gray, Byte>(); //Convert it to Grayscale
            _gray = _egray.Copy();    // Copy image in Grayscale            
            _egray._EqualizeHist(); // Equalize
            Image<Gray, Byte> tempgray = _egray.Copy();

            MCvAvgComp[][] facesDetected = _egray.DetectHaarCascade(_faces, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20));


            foreach (MCvAvgComp f in facesDetected[0])
            {
                if (f.neighbors > 100)
                {
                    //_image.Draw(f.rect, new Bgr(System.Drawing.Color.Blue), 2); // face
                    tempgray.ROI = f.rect; //Set the region of interest on the faces
                    MCvAvgComp[][] eyesDetected = tempgray.DetectHaarCascade(_eyes, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20));
                    if (eyesDetected[0].Length != 0)
                    {
                        foreach (MCvAvgComp e in eyesDetected[0])
                        {
                            if (e.neighbors > 100)
                            {
                                System.Drawing.Rectangle eyeRect = e.rect;
                                eyeRect.Offset(f.rect.X, f.rect.Y);
                                _image.Draw(eyeRect, new Bgr(System.Drawing.Color.Red), 2);
                            }
                        }
                    }

                }
            }

            this._processedImages = new IImage[3];
            this._processedImages[0] = _gray;
            this._processedImages[1] = _egray;
            this._processedImages[2] = _image;

        }
开发者ID:ravidasghodse,项目名称:genericva,代码行数:41,代码来源:EyesDetection.cs

示例4: sensor_AllFramesReady

        private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            //TODO Keep the previous frame image as well,
            //Compare both on a background process and save it to the worksheet
            //Convert x&y differences to millimeters according to depth data (distance)
            //and some trigonometry
            BitmapSource depthBmp = null;
            blobCount = 0;

            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                {
                    if (depthFrame != null)
                    {

                        blobCount = 0;

                        depthBmp = depthFrame.SliceDepthImage((int)sliderMin.Value, (int)sliderMax.Value);

                        Image<Bgr, Byte> openCVImg = new Image<Bgr, byte>(depthBmp.ToBitmap());
                        Image<Gray, byte> gray_image = openCVImg.Convert<Gray, byte>();

                        if (running)
                        {
                            wsheet.Cells[1, frameCount + 1].Value = "Frame " + frameCount;
                            frameCount++;
                            using (MemStorage stor = new MemStorage())
                            {
                                //Find contours with no holes try CV_RETR_EXTERNAL to find holes
                                Contour<System.Drawing.Point> contours = gray_image.FindContours(
                                 Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                                 Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
                                 stor);

                                //Conversion of depthPixels to skeletonPoints which contain all three dimensions in meters.
                                //The conversion and copying is assumed to be costly but there are no single pixel to single point conversion I could find.
                                depthFrame.CopyDepthImagePixelDataTo(depthPixels);
                                //mapper.MapDepthFrameToSkeletonFrame(depthFormat, depthPixels, skeletonPoints);

                                for (int i = 0; contours != null; contours = contours.HNext)
                                {
                                    i++;

                                    if ((contours.Area > Math.Pow(sliderMinSize.Value, 2)) && (contours.Area < Math.Pow(sliderMaxSize.Value, 2)))
                                    {
                                        MCvBox2D box = contours.GetMinAreaRect();
                                        //DrQ RED BOX AROUND BLOB
                                        openCVImg.Draw(box, new Bgr(System.Drawing.Color.Red), 2);
                                        blobCount++;
                                        int x = (int) box.center.X;
                                        int y = (int) box.center.Y;
                                        DepthImagePoint p = new DepthImagePoint();
                                        p.X = x;
                                        p.Y = y;
                                        p.Depth = depthPixels[x + 640 * y].Depth;
                                        SkeletonPoint s = mapper.MapDepthPointToSkeletonPoint(depthFormat, p);

                                        //TODO Conversion from absolute coordinates to relative coordinates

                                        addCoordData(3 * blobCount - 1, frameCount, s.X, s.Y, s.Z);
                                        /*if (KinectSensor.IsKnownPoint(s))
                                        {
                                            addCoordData(3 * blobCount - 1, frameCount, s.X, s.Y, s.Z);
                                        }*/

                                    }
                                }

                            }
                        }

                        this.outImg.Source = ImageHelpers.ToBitmapSource(openCVImg);
                        txtBlobCount.Text = blobCount.ToString();

                        getNext().RunWorkerAsync(openCVImg);
                    }
                }

                if (colorFrame != null)
                {

                      colorFrame.CopyPixelDataTo(this.colorPixels);
                      this.colorBitmap.WritePixels(
                          new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                          this.colorPixels,
                          this.colorBitmap.PixelWidth * sizeof(int),
                          0);

                }
            }
        }
开发者ID:kadron,项目名称:rat-trek,代码行数:92,代码来源:MainWindow.xaml.cs

示例5: myKinect_ColorFrameReady

        void myKinect_ColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
        {
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                if (colorFrame == null) return;

                bmap = OpenCV2WPFConverter.ColorImageFrameToBitmap(colorFrame);

                imgBgr = new Image<Bgr, Byte>(bmap);
                imgHsv = new Image<Hsv, Byte>(bmap);

                if (imgBgr == null || imgHsv == null) return;
                processedBgr = imgBgr.InRange(new Bgr(B_min, G_min, R_min), new Bgr(B_max, G_max, R_max));

                processedHsv = imgHsv.InRange(new Hsv(H_min, S_min, V_min), new Hsv(H_max, S_max, V_max));
                //0,130,0 ~ 120, 256, 120 for green color.
                processedBgr = processedBgr.SmoothGaussian(7);
                processedHsv = processedHsv.SmoothGaussian(7);

                CircleF[] circlesBgr = processedBgr.HoughCircles(cannyThreshold, circleAccumulatorThreshold
                    , 2, processedBgr.Height / 8 , 8, 40)[0];

                CircleF[] circlesHsv = processedBgr.HoughCircles(cannyThreshold, circleAccumulatorThreshold
                    , 2, processedHsv.Height / 8, 8, 40)[0];

                HsvCircleCount = 0;
                RgbCircleCount = 0;

                // Draw Circles for RBG video stream
                foreach (CircleF circle in circlesBgr)
                {

                    RgbCircleCount += 1;
                    imgBgr.Draw(circle, new Bgr(System.Drawing.Color.Bisque), 3);

                }

                // Draw Circles for HSV video stream
                foreach (CircleF circle in circlesHsv)
                {

                    HsvCircleCount += 1;
                    imgBgr.Draw(circle, new Bgr(System.Drawing.Color.Bisque), 3);

                }

                kinectVideo.Source = OpenCV2WPFConverter.ToBitmapSource(imgBgr);
                HsvVideo.Source = OpenCV2WPFConverter.ToBitmapSource(processedHsv);
                RgbVideo.Source = OpenCV2WPFConverter.ToBitmapSource(processedBgr);
                //control the distance of different circles!
                this.HsvCircleUI.Content = HsvCircleCount.ToString();
                this.RgbCircleUI.Content = RgbCircleCount.ToString();

            }
        }
开发者ID:handsomesun,项目名称:hsv-finder,代码行数:55,代码来源:MainWindow.xaml.cs

示例6: detectFaces

        private void detectFaces(Image<Bgr,Byte> image, int mode)
        {
            /* Check to see that there was a frame collected */
            if (image != null)
            {
                if (mode == CAMERA)
                    mode_name.Content = "Camera";
                else
                    mode_name.Content = "Image";

                /* convert the frame from the camera to a transformed Image that improves facial detection */
                Image<Gray, Byte> grayFrame = image.Convert<Gray, Byte>();
                /* Detect how many faces are there on the image */
                var detectedFaces = grayFrame.DetectHaarCascade(haarCascade)[0];

                /* update the faces count */
                faces_count.Content = detectedFaces.Length;
                /* loop through all faces that were detected and draw a rectangle */
                foreach (var face in detectedFaces)
                {
                    image.Draw(face.rect, new Bgr(0, double.MaxValue, 0), 3);
                }
                image_to_filter = image;
                filtered_image = image;
                modified_image = ToBitmapSource(image);
                /* set the transformed image to the image1 object */
                image1.Source = modified_image;
            }
        }
开发者ID:GeorgiAngelov,项目名称:Facial-Detection-WPF,代码行数:29,代码来源:CameraCapture.xaml.cs

示例7: Canny

        private Image<Bgr, byte> Canny(Image<Bgr, byte> newImg)
        {
            return newImg;
            Image<Ycc, Byte> ycc = newImg.Convert<Ycc, byte>();

            var skin = ycc.InRange(new Ycc(0, 131, 80), new Ycc(255, 185, 135));
            skin.Erode(2);
            skin.Dilate(2);

            var contours = skin.FindContours();
            if (contours != null)
            {
                List<Seq<Point>> allContours = new List<Seq<Point>>();
                for (Seq<Point> c = contours; c != null; c = c.HNext)
                {
                    allContours.Add(c);
                }
                allContours.Sort((a, b) => b.Total - a.Total);

                var biggest = allContours.Take(2);

                foreach (Seq<Point> points in biggest)
                {
                    var hull = points.GetConvexHull(ORIENTATION.CV_CLOCKWISE);

                    newImg.Draw(hull, new Bgr(Color.Red), 2);
                }
            }

            return newImg;
        }
开发者ID:HumanRemote,项目名称:HumanRemote,代码行数:31,代码来源:BackgroundSubtractProcessor.cs

示例8: drawOnscreen

        /// <summary>
        /// Used for drawing for hot corner boxes on screen
        /// </summary>
        /// <param name="img"></param>
        /// <returns></returns>
        public Image<Bgr, Byte> drawOnscreen(Image<Bgr, Byte> img)
        {
            //Assuming the image we're using is 640X480
            MCvBox2D boxUpR = new MCvBox2D(new PointF(200, 200), boxSize, 0);

            img.Draw(boxUpR, new Bgr(System.Drawing.Color.Green), 2);

            return img;
        }
开发者ID:jlark,项目名称:cvlock,代码行数:14,代码来源:MainWindow.xaml.cs

示例9: FrameGrabber

        void FrameGrabber(object sender, EventArgs e)
        {
            NamePersons.Add("");

            currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

            //Convert it to Grayscale
            gray = currentFrame.Convert<Gray, Byte>();

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
              face,
              1.2,
              10,
              Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
              new System.Drawing.Size(20, 20));

            Console.WriteLine(facesDetected[0].Length);

            //Action for each element detected
            foreach (MCvAvgComp f in facesDetected[0])
            {
                t = t + 1;
                result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                //draw the face detected in the 0th (gray) channel with blue color
                currentFrame.Draw(f.rect, new Bgr(System.Drawing.Color.Red), 2);

                if (trainingImages.ToArray().Length != 0)
                {
                    //TermCriteria for face recognition with numbers of trained images like maxIteration
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                       trainingImages.ToArray(),
                       labels.ToArray(),
                       3000,
                       ref termCrit);

                    name = recognizer.Recognize(result);

                    //Draw the label for each face detected and recognized
                    currentFrame.Draw(name, ref font, new System.Drawing.Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(System.Drawing.Color.LightGreen));

                }

                NamePersons[t - 1] = name;
                NamePersons.Add("");

                label3.Text = facesDetected[0].Length.ToString();

                if (result != null)
                {
                    dispatcherTimer.Stop();
                    break;
                }

            }
            t = 0;

            //Names concatenation of persons recognized
            for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
            {
                names = names + NamePersons[nnn] + ", ";
            }

            imageBoxFrameGrabber.Source = ConvertImage(currentFrame.ToBitmap());
            label4.Text = names;
            names = "";
            //Clear the list(vector) of names
            NamePersons.Clear();
        }
开发者ID:nycode,项目名称:ATTIoT,代码行数:71,代码来源:StudentCreateView.xaml.cs

示例10: helloWorldTestButton_Click

        private void helloWorldTestButton_Click(object sender, RoutedEventArgs e)
        {
            //Create an image of 400x200 of Blue color
            using (Image<Bgr, Byte> img = new Image<Bgr, byte>(400, 200, new Bgr(255, 0, 0)))
            {
                //Create the font
                MCvFont f = new MCvFont(FONT.CV_FONT_HERSHEY_COMPLEX, 1.0, 1.0);

                //Draw "Hello, world." on the image using the specific font
                img.Draw("Hello, world", ref f, new Point(10, 80), new Bgr(0, 255, 0));

                //Show the image using ImageViewer from Emgu.CV.UI
                CvInvoke.cvShowImage("Hello World Test Window", img.Ptr);
            }

            // Test crop
            using (Image<Gray, Byte> img = new Image<Gray, byte>(400, 200, new Gray(0)))
            {
                MCvFont f = new MCvFont(FONT.CV_FONT_HERSHEY_COMPLEX, 1.0, 1.0);
                img.Draw("Hello, world", ref f, new Point(10, 80), new Gray(255));
                using (Image<Gray, Byte> img2 = Utilities.stripBorder(img, new Gray(100)))
                    CvInvoke.cvShowImage("After crop", img2.Ptr);
            }
        }
开发者ID:swkrueger,项目名称:signrider,代码行数:24,代码来源:HomeMenuView.xaml.cs

示例11: DrawLinesHoriontal

        private void DrawLinesHoriontal(List<System.Drawing.PointF> markersF, Image<Bgr, byte> outImg1, bool captureAngles)
        {


            for (int i = 0; i < markersF.Count; i++)
            {
                LineSegment2D line1 = new LineSegment2D(new System.Drawing.Point(0, (int)markersF[i].Y),
                                                    new System.Drawing.Point((int)markersF[i].X, (int)markersF[i].Y));

                outImg1.Draw(line1, new Bgr(System.Drawing.Color.Red), 1);
            }
            
          
            countFrames++;
        }
开发者ID:kevinmartell91,项目名称:HRNPCI,代码行数:15,代码来源:FisioterapeutaTestAnalisisView.xaml.cs

示例12: sensor_AllFramesReady

        private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            BitmapSource depthBmp = null;
            blobCount = 0;

            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                {
                    if (depthFrame != null)
                    {

                        blobCount = 0;

                        depthBmp = depthFrame.SliceDepthImage((int)sliderMin.Value, (int)sliderMax.Value);

                        Image<Bgr, Byte> openCVImg = new Image<Bgr, byte>(depthBmp.ToBitmap());
                        Image<Gray, byte> gray_image = openCVImg.Convert<Gray, byte>();

                        //Find contours
                        using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
                        {
                            CvInvoke.FindContours(gray_image, contours, new Mat(), Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);

                            for (int i = 0; i < contours.Size; i++)
                            {
                                VectorOfPoint contour = contours[i];
                                double area = CvInvoke.ContourArea(contour, false);

                                if ((area > Math.Pow(sliderMinSize.Value, 2)) && (area < Math.Pow(sliderMaxSize.Value, 2)))
                                {
                                    System.Drawing.Rectangle box = CvInvoke.BoundingRectangle(contour);
                                    openCVImg.Draw(box, new Bgr(System.Drawing.Color.Red), 2);
                                    blobCount++;
                                }
                            }
                        }

                        this.outImg.Source = ImageHelpers.ToBitmapSource(openCVImg);
                        txtBlobCount.Text = blobCount.ToString();
                    }
                }

                if (colorFrame != null)
                {

                    colorFrame.CopyPixelDataTo(this.colorPixels);
                    this.colorBitmap.WritePixels(
                        new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                        this.colorPixels,
                        this.colorBitmap.PixelWidth * sizeof(int),
                        0);

                }
            }
        }
开发者ID:ericvruder,项目名称:SW9_Project,代码行数:56,代码来源:KinectWindow.xaml.cs

示例13: SensorAllFramesReady


//.........这里部分代码省略.........
                }
            }

            // DEMO 1: blur and boost
            else if (demo == 1 && skeleton != null && colourImage != null
                && depthImage != null)
            {
                SkeletonPoint sleft = skeleton.Joints[JointType.HandLeft].Position;
                SkeletonPoint sright = skeleton.Joints[JointType.HandRight].Position;
                double hand_x_dist = Math.Abs(sleft.X - sright.Y);
                double hand_y_dist = Math.Abs(sleft.Y - sright.Y);

                // scale by 2 to speed up
                displayImage = colourImage.Resize(0.5, INTER.CV_INTER_NN);
                // displayImage = colourImage.Copy(); // slower

                // boost the RGB values based on vertical hand distance
                float boost = 3 - (float)(hand_y_dist * 5);
                displayImage = colourImage.Convert(delegate(Byte b)
                    { return (byte)((b * boost < 255) ? (b * boost) : 255); });

                // blur based on horizontal hand distance
                int blur = (int)(hand_x_dist * 20);
                if (blur > 0)
                    displayImage = displayImage.SmoothBlur(blur, blur);

                // show debug
                if (ShowDebug)
                {
                    debugImg2 = depthImage.Convert<Bgr, Byte>();
                    DepthImagePoint dp;
                    dp = sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(sleft,
                        sensor.DepthStream.Format);
                    debugImg2.Draw(new CircleF(dp.ToPointF(), 20), new Bgr(Color.Coral), 1);
                    dp = sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(sright,
                        sensor.DepthStream.Format);
                    debugImg2.Draw(new CircleF(dp.ToPointF(), 20), new Bgr(Color.LightGreen), 1);
                    Utilities.WriteDebugText(debugImg2, 10, 40, "{0:.00}m {1:0.00}m",
                        hand_x_dist, hand_y_dist);
                }

            }

            // DEMO 2: Painting
            else if (demo == 2 && skeleton != null &&
                colourImage != null && depthImage != null)
            {
                // create a player mask for player we want
                byte playerIndex = (byte)(Array.IndexOf(skeletons, skeleton) + 1);

                //double[] min, max;
                //Point[] pmin, pmax;
                //playerMasks.MinMax(out min, out max, out pmin, out pmax);

                // pick the player mask for the skeleton we're tracking
                Image<Gray, Byte> playerMask = playerMasks.Convert(delegate(Byte b)
                    { return (Byte)(b == playerIndex ? 255 : 0); });

                // register depth to Rgb using Emgu
                // compute homography if first frame
                if (depthToRGBHomography == null)
                    depthToRGBHomography = ComputeDepthToRGBHomography(
                        depthImage.Convert<Gray, byte>(), sensor);
                // do the registration warp
                Image<Gray, byte> registeredplayerMask = playerMask.WarpPerspective(
                    depthToRGBHomography, INTER.CV_INTER_CUBIC, WARP.CV_WARP_DEFAULT,
开发者ID:nonsequitoria,项目名称:kinect_cv,代码行数:67,代码来源:MainWindow.xaml.cs

示例14: RepaintColorFrame

    public void RepaintColorFrame(Image<Bgra, Byte> buffer, NBody body) {

      // Face & Name
      var head = body.GetJoint(NJointType.Head).Area;
      if (head.Width > 0) {
        buffer.Draw(head, brdGreen, 4);
        if (body.Name != null) {
          var txt = new Rectangle(head.X, head.Y + head.Height + 20, head.Width, head.Height); 
          DrawText(buffer, txt, body.Name);
        }
      }

      // Hands
      var right = body.GetJoint(NJointType.HandRight).Area;
      if (right.Width > 0) {
        buffer.Draw(right, brdGreen, 4);
      }

      var left = body.GetJoint(NJointType.HandLeft).Area;
      if (left.Width > 0) {
        buffer.Draw(left, brdGreen, 4);
      }

      // Joints
      foreach (var joint in body.Joints.Values) {
        if (joint == null) { continue; }
        if (joint.Tracking == NTrackingState.NotTracked) { continue; }
        var color = joint.Tracking == NTrackingState.Inferred ? brdRed : brdGreen;
        buffer.Draw(new CircleF(joint.Position2D, 10.0f), color, 10);
      }

      // Torso
      DrawBone(body, NJointType.Head, NJointType.Neck);
      DrawBone(body, NJointType.Neck, NJointType.SpineShoulder);
      DrawBone(body, NJointType.SpineShoulder, NJointType.SpineMid);
      DrawBone(body, NJointType.SpineMid, NJointType.SpineBase);
      DrawBone(body, NJointType.SpineShoulder, NJointType.ShoulderRight);
      DrawBone(body, NJointType.SpineShoulder, NJointType.ShoulderLeft);
      DrawBone(body, NJointType.SpineBase, NJointType.HipRight);
      DrawBone(body, NJointType.SpineBase, NJointType.HipLeft);

      // Right Arm    
      DrawBone(body, NJointType.ShoulderRight, NJointType.ElbowRight);
      DrawBone(body, NJointType.ElbowRight, NJointType.WristRight);
      DrawBone(body, NJointType.WristRight, NJointType.HandRight);
      DrawBone(body, NJointType.HandRight, NJointType.HandTipRight);
      DrawBone(body, NJointType.WristRight, NJointType.ThumbRight);

      // Left Arm
      DrawBone(body, NJointType.ShoulderLeft, NJointType.ElbowLeft);
      DrawBone(body, NJointType.ElbowLeft, NJointType.WristLeft);
      DrawBone(body, NJointType.WristLeft, NJointType.HandLeft);
      DrawBone(body, NJointType.HandLeft, NJointType.HandTipLeft);
      DrawBone(body, NJointType.WristLeft, NJointType.ThumbLeft);

      // Right Leg
      DrawBone(body, NJointType.HipRight, NJointType.KneeRight);
      DrawBone(body, NJointType.KneeRight, NJointType.AnkleRight);
      DrawBone(body, NJointType.AnkleRight, NJointType.FootRight);

      // Left Leg
      DrawBone(body, NJointType.HipLeft, NJointType.KneeLeft);
      DrawBone(body, NJointType.KneeLeft, NJointType.AnkleLeft);
      DrawBone(body, NJointType.AnkleLeft, NJointType.FootLeft);
    }
开发者ID:JpEncausse,项目名称:SARAH-Client-Windows,代码行数:65,代码来源:AddOnTask.cs

示例15: faceDetect

        private Bitmap faceDetect(Bitmap bitmap)
        {
            HaarCascade haar = new HaarCascade("haarcascade_frontalface_alt.xml");
            Image<Bgr, byte> temp = new Image<Bgr, byte>(bitmap);
            Image<Gray, byte> grayframe = temp.Convert<Gray, byte>();
            var faces =
                    grayframe.DetectHaarCascade(
                            haar, 1.4, 4,
                            HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                            new System.Drawing.Size(temp.Width / 8, temp.Height / 8)
                            )[0];

            foreach (var face in faces)
            {
                temp.Draw(face.rect, new Bgr(0, double.MaxValue, 0), 3);
            }
           return temp.ToBitmap();
        }
开发者ID:bcoleman532,项目名称:Interactive-Prototype,代码行数:18,代码来源:MainWindow.xaml.cs


注:本文中的System.Windows.Controls.Image.Draw方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。