当前位置: 首页>>代码示例>>C#>>正文


C# DepthImageFrame.MapFromSkeletonPoint方法代码示例

本文整理汇总了C#中Microsoft.Kinect.DepthImageFrame.MapFromSkeletonPoint方法的典型用法代码示例。如果您正苦于以下问题:C# DepthImageFrame.MapFromSkeletonPoint方法的具体用法?C# DepthImageFrame.MapFromSkeletonPoint怎么用?C# DepthImageFrame.MapFromSkeletonPoint使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Microsoft.Kinect.DepthImageFrame的用法示例。


在下文中一共展示了DepthImageFrame.MapFromSkeletonPoint方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: HeightMeasure

        /// <summary>
        /// 距離データをカラー画像に変換する
        /// </summary>
        /// <param name="kinect"></param>
        /// <param name="depthFrame"></param>
        /// <returns></returns>
        private void HeightMeasure( KinectSensor kinect, DepthImageFrame depthFrame, SkeletonFrame skeletonFrame )
        {
            ColorImageStream colorStream = kinect.ColorStream;
              DepthImageStream depthStream = kinect.DepthStream;

              // トラッキングされている最初のスケルトンを取得する
              // インデックスはプレイヤーIDに対応しているのでとっておく
              Skeleton[] skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
              skeletonFrame.CopySkeletonDataTo( skeletons );

              int playerIndex = 0;
              for ( playerIndex = 0; playerIndex < skeletons.Length; playerIndex++ ) {
            if ( skeletons[playerIndex].TrackingState == SkeletonTrackingState.Tracked ) {
              break;
            }
              }
              if ( playerIndex == skeletons.Length ) {
            return;
              }

              // トラッキングされている最初のスケルトン
              Skeleton skeleton = skeletons[playerIndex];

              // 実際のプレイヤーIDは、スケルトンのインデックス+1
              playerIndex++;

              // 頭、足先がトラッキングされていない場合は、そのままRGBカメラのデータを返す
              Joint head = skeleton.Joints[JointType.Head];
              Joint leftFoot = skeleton.Joints[JointType.FootLeft];
              Joint rightFoot = skeleton.Joints[JointType.FootRight];
              if ( (head.TrackingState != JointTrackingState.Tracked) ||
               (leftFoot.TrackingState != JointTrackingState.Tracked) ||
               (rightFoot.TrackingState != JointTrackingState.Tracked) ) {
             return;
              }

              // 距離カメラのピクセルごとのデータを取得する
              short[] depthPixel = new short[depthFrame.PixelDataLength];
              depthFrame.CopyPixelDataTo( depthPixel );

              // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ)
              ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength];
              kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel,
            colorStream.Format, colorPoint );

              // 頭のてっぺんを探す
              DepthImagePoint headDepth = depthFrame.MapFromSkeletonPoint( head.Position );
              int top = 0;
              for ( int i = 0; (headDepth.Y - i) > 0; i++ ) {
            // 一つ上のY座標を取得し、プレイヤーがいなければ、現在の座標が最上位
            int index = ((headDepth.Y - i) * depthFrame.Width) + headDepth.X;
            int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask;
            if ( player == playerIndex ) {
              top = i;
            }
              }

              // 頭のてっぺんを3次元座標に戻し、足の座標(下にあるほう)を取得する
              // この差分で身長を測る
              head.Position = depthFrame.MapToSkeletonPoint( headDepth.X, headDepth.Y - top );
              Joint foot = (leftFoot.Position.Y < rightFoot.Position.Y) ? leftFoot : rightFoot;

              // 背丈を表示する
              DrawMeasure( kinect, colorStream, head, foot );
        }
开发者ID:hatsunea,项目名称:KinectSDKBook4CS,代码行数:71,代码来源:MainWindow.xaml.cs

示例2: GetPosition2DLocation

        private Point GetPosition2DLocation(DepthImageFrame depthFrame, SkeletonPoint skeletonPoint)
        {
            DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(skeletonPoint);

            ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, this._sensor.ColorStream.Format);

            // map back to skeleton.Width & skeleton.Height
            //return new Point(
            //    (int)(this.RenderSize.Width * colorPoint.X / this._sensor.ColorStream.FrameWidth),
            //    (int)(this.RenderSize.Height * colorPoint.Y / this._sensor.ColorStream.FrameHeight));
            return new Point(
                (int)(canvas1.Width * colorPoint.X / this._sensor.ColorStream.FrameWidth),
                (int)(canvas1.Height * colorPoint.Y / this._sensor.ColorStream.FrameHeight));
        }
开发者ID:williamchai,项目名称:kinect-ipd,代码行数:14,代码来源:MainWindow.xaml.cs

示例3: GetRealLength

        private double GetRealLength(DepthImageFrame depthFrame, SkeletonPoint skeletonPoint1,SkeletonPoint skeletonPoint2)
        {
            DepthImagePoint depthPoint1 = depthFrame.MapFromSkeletonPoint(skeletonPoint1);
            DepthImagePoint depthPoint2 = depthFrame.MapFromSkeletonPoint(skeletonPoint2);

            double depth = (depthPoint1.Depth+depthPoint2.Depth)/2;
            //double lengthPixel = Math.Sqrt((depthPoint1.X-depthPoint2.X)^2+(depthPoint1.Y-depthPoint2.Y)^2);

            double lengthPixel = Math.Abs(depthPoint1.Y - depthPoint2.Y);
            double frameLengthReal = 2 * depth * Math.Tan(57 / 2 * Math.PI / 180);

            // lengthReal/frameLengthReal = lengthPixel/frameLengthPixel
            double lengthReal = lengthPixel * frameLengthReal / depthFrame.Height;
            return lengthReal;
        }
开发者ID:williamchai,项目名称:kinect-ipd,代码行数:15,代码来源:MainWindow.xaml.cs

示例4: DoDepthDetecting

        /// <summary>
        /// Check queue if right hand and elbow positions are stable
        /// under the condition that Status is DepthDetecting.
        /// If is stable, the Status will be changed to next after doing depth calibration.
        /// If not, nothing will be changed.
        /// </summary>
        public void DoDepthDetecting(KinectSensor sensor, DepthImageFrame depthFrame)
        {
            if (Status == MainStatus.DepthDetecting)
            {
                if (IsStable(rightHandQueue, DEPTH_HAND_TOLERANCE, DEPTH_HAND_COUNT))
                {
                    // if is stable, calibrate depth according to avg of position near hand
                    SkeletonPoint handPoint = rightHandQueue.Last().Position;
                    DepthImagePoint centerDepthPoint = sensor.MapSkeletonPointToDepth(
                        handPoint, sensor.DepthStream.Format);
                    int[] depthArr = new int[DEPTH_NEAR_COUNT * 4];
                    int index = 0;
                    for (int i = 0; i < DEPTH_NEAR_COUNT; ++i)
                    {
                        // top
                        SkeletonPoint topSke = depthFrame.MapToSkeletonPoint(
                            centerDepthPoint.X - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i),
                            centerDepthPoint.Y - DEPTH_NEAR_RADIUS);
                        depthArr[index++] = depthFrame.MapFromSkeletonPoint(topSke).Depth;
                        // bottom
                        SkeletonPoint bottomSke = depthFrame.MapToSkeletonPoint(
                            centerDepthPoint.X - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i),
                            centerDepthPoint.Y + DEPTH_NEAR_RADIUS);
                        depthArr[index++] = depthFrame.MapFromSkeletonPoint(bottomSke).Depth;
                        // left
                        SkeletonPoint leftSke = depthFrame.MapToSkeletonPoint(
                            centerDepthPoint.X - DEPTH_NEAR_RADIUS,
                            centerDepthPoint.Y - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i));
                        depthArr[index++] = depthFrame.MapFromSkeletonPoint(leftSke).Depth;
                        // right
                        SkeletonPoint rightSke = depthFrame.MapToSkeletonPoint(
                            centerDepthPoint.X + DEPTH_NEAR_RADIUS,
                            centerDepthPoint.Y - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i));
                        depthArr[index++] = depthFrame.MapFromSkeletonPoint(rightSke).Depth;
                    }
                    // set median(rather than mean) depth of the list
                    Array.Sort(depthArr);
                    kCalibrator.SetDepth(depthArr[DEPTH_NEAR_COUNT / 2]);

                    ClearQueue();
                    Status = MainStatus.EdgeDetecting;
                    mainWindow.textBlock.Text = "Detecting edge now. Arm at four corners of the screen.";
                    // cooling time timer
                    edgeTimer.Start();
                    edgeCooling = true;
                }
            }
        }
开发者ID:hawkingrei,项目名称:PPKinecT,代码行数:54,代码来源:MainController.cs

示例5: ProcessFrame

        public void ProcessFrame(KinectSensor sensor, byte[] colorImage, ColorImageFormat colorImageFormat, DepthImageFrame depthFrame, short[] depthImage, DepthImageFormat depthImageFormat, Skeleton[] skeletonData, SkeletonFrame skeletonFrame)
        {
            //Console.WriteLine("N: ---------");
            coordinates.Clear();
            int detectedFace = 0;
            int trackedSkeletonsCount = 0;

            int playerIndex = -1;
            for (int i = 0; i < skeletonData.Length; i++)
            //foreach (Skeleton skeleton in skeletonData)
            {
                Skeleton skeleton = skeletonData[i];
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked
                    || skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(skeleton.Joints[JointType.Head].Position);
                    ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, colorImageFormat);

                    Coordinates2D c = new Coordinates2D();

                    playerIndex = i + 1;

                    c.X = colorPoint.X;
                    c.Y = colorPoint.Y;
                    c.Width = 0;
                    c.Height = 0;
                    c.LeftEyeX = 0;
                    c.LeftEyeY = 0;
                    c.RightEyeX = 0;
                    c.RightEyeY = 0;
                    c.PlayerIndex = playerIndex;

                    trackedSkeletonsCount++;

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (!scannedIdentities.Contains(skeleton.TrackingId) &&
                        detectedFace < 1 &&
                        trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        detectedFace++;
                        scannedIdentities.Add(skeleton.TrackingId);

                        skeletonFaceTracker.OnFrameReady(sensor, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                        Coordinates2D? realCoords = skeletonFaceTracker.GetFaceCoordinates();
                        if (realCoords.HasValue)
                        {
                            c = realCoords.Value;
                            c.PlayerIndex = playerIndex;
                        }
                    }

                    c.TrackingId = skeleton.TrackingId;
                    coordinates.Add(c);
                }
            }

            if (scannedIdentities.Count > 0 && scannedIdentities.Count >= trackedSkeletonsCount)
            {
                scannedIdentities.Clear();
                //Console.WriteLine("Clearing");
            }

            RemoveOldTrackers(skeletonFrame.FrameNumber);

            //if (coordinates.Count > 0)
            {
                int[] identities = new int[coordinates.Count];

              //  stopwatch.Reset();
              //  stopwatch.Start();
                double[] distances = new double[coordinates.Count * 8];
                this.
                 ProcessImage(colorImage, GetWidth(colorImageFormat), GetHeight(colorImageFormat), depthImage, 640, 480, coordinates.ToArray(), identities, distances);
              //  stopwatch.Stop();
             //       foreach (int i in identities)
             //       {
             //           Console.WriteLine("Recognized: {0} (in {1} millis - {2} ticks)", i, stopwatch.ElapsedMilliseconds, stopwatch.ElapsedTicks);
             //       }
            }
        }
开发者ID:lomelina,项目名称:Biosandbox,代码行数:88,代码来源:BiometricEngine.cs

示例6: GetPosition2DLocation

        /// <summary>
        /// Convert real-world positions to 2D image coords.
        /// </summary>
        /// <param name="depthFrame">Datth frame used to conversion.</param>
        /// <param name="skeletonPosition">Point which position should be converted.</param>
        /// <returns>Point in 2D image.</returns>
        private Point GetPosition2DLocation(DepthImageFrame depthFrame, SkeletonPoint skeletonPosition)
        {
            DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(skeletonPosition);
            ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, kinect.ColorStream.Format);

            // map back to skeleton.Width & skeleton.Height
            return new Point(colorPoint.X, colorPoint.Y);
        }
开发者ID:PuniCZ,项目名称:KinectGestureRecognition,代码行数:14,代码来源:KinectGesturePlayer.cs

示例7: getDisplayPosition

        /// <summary>
        /// get a joint display position
        /// </summary>
        /// <param name="joint"></param>
        /// <returns></returns>
        private Point getDisplayPosition(DepthImageFrame depthFrame, Joint joint)
        {
            DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(joint.Position);

            return new Point(
                        (int)(userCanvas.Width * depthPoint.X / depthFrame.Width),
                        (int)(userCanvas.Height * depthPoint.Y / depthFrame.Height));
        }
开发者ID:sidhub1,项目名称:kinesis,代码行数:13,代码来源:MainWindow.xaml.cs

示例8: GetPosition2DLocation

        private Point GetPosition2DLocation(DepthImageFrame depthFrame, SkeletonPoint skeletonPoint)
        {
            DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(skeletonPoint);
            return new Point(
                        (int)(this.RenderSize.Width * depthPoint.X / depthFrame.Width),
                        (int)(this.RenderSize.Height * depthPoint.Y / depthFrame.Height));
            /*
            switch (ImageType)
            {
                case ImageType.Color:
                    ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, kinectSensor.ColorStream.Format);

                    // map back to skeleton.Width & skeleton.Height
                    return new Point(
                        (int)(this.RenderSize.Width * colorPoint.X / kinectSensor.ColorStream.FrameWidth),
                        (int)(this.RenderSize.Height * colorPoint.Y / kinectSensor.ColorStream.FrameHeight));
                case ImageType.Depth:
                    return new Point(
                        (int)(this.RenderSize.Width * depthPoint.X / depthFrame.Width),
                        (int)(this.RenderSize.Height * depthPoint.Y / depthFrame.Height));
                default:
                    throw new ArgumentOutOfRangeException("ImageType was a not expected value: " + ImageType.ToString());
            }
            */
        }
开发者ID:SoarLin,项目名称:KinectProject,代码行数:25,代码来源:MainWindow.xaml.cs

示例9: MapToPoint

 public Point MapToPoint(SkeletonPoint s, DepthImageFrame depth)
 {
     DepthImagePoint dp = depth.MapFromSkeletonPoint(s);
     ColorImagePoint cp = depth.MapToColorImagePoint(dp.X, dp.Y, ColorImageFormat.RgbResolution640x480Fps30);
     return new Point(cp.X, cp.Y);
 }
开发者ID:pedrotanaka,项目名称:CS160-Final-Project,代码行数:6,代码来源:KinectController.cs

示例10: CalculatePlayerPositions

        private int[] CalculatePlayerPositions(SkeletonFrame skeletonFrame, DepthImageFrame depthFrame)
        {
            Skeleton[] allskeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
            int[] pointdata = new int[skeletonFrame.SkeletonArrayLength * 3];
            skeletonFrame.CopySkeletonDataTo(allskeletons);
            int k = 0;

            foreach (Skeleton skeleton in allskeletons)
            {
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked)
                {
                    Joint jointpoint = this.BestTrackedJoint(skeleton);
                    if (jointpoint.TrackingState == JointTrackingState.Tracked)
                    {
                        var depthImagePoint = depthFrame.MapFromSkeletonPoint(jointpoint.Position);
                        pointdata[k] = depthImagePoint.Depth;
                        pointdata[k + 1] = depthImagePoint.X;
                        pointdata[k + 2] = depthImagePoint.Y;
                    }
                    else
                    {
                        pointdata[k] = 0;
                        pointdata[k + 1] = 0;
                        pointdata[k + 2] = 0;
                    }
                }
                else
                {
                    pointdata[k] = 0;
                    pointdata[k + 1] = 0;
                    pointdata[k + 2] = 0;
                }

                k += 3;
            }

            return pointdata;
        }
开发者ID:OccupOS,项目名称:OccupOSNode,代码行数:38,代码来源:NodeKinectSensor.cs


注:本文中的Microsoft.Kinect.DepthImageFrame.MapFromSkeletonPoint方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。