本文整理汇总了C#中CoordinateMapper.MapSkeletonPointToDepthPoint方法的典型用法代码示例。如果您正苦于以下问题:C# CoordinateMapper.MapSkeletonPointToDepthPoint方法的具体用法?C# CoordinateMapper.MapSkeletonPointToDepthPoint怎么用?C# CoordinateMapper.MapSkeletonPointToDepthPoint使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CoordinateMapper
的用法示例。
在下文中一共展示了CoordinateMapper.MapSkeletonPointToDepthPoint方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Convert
/// <summary>
/// Maps a 3D skeleton point to a 2D vector.
/// </summary>
/// <param name="sensor">The Kinect sensor.</param>
/// <param name="position">The skeleton point to map.</param>
/// <param name="coordinateMapper">The coordinate mapper.</param>
/// <returns>The 2D mapped position.</returns>
public static Vector2 Convert(KinectSensor sensor, SkeletonPoint position, CoordinateMapper coordinateMapper)
{
float width = 0;
float height = 0;
float x = 0;
float y = 0;
if (sensor.ColorStream.IsEnabled)
{
var colorPoint = coordinateMapper.MapSkeletonPointToColorPoint(position, sensor.ColorStream.Format);
x = colorPoint.X;
y = colorPoint.Y;
switch (sensor.ColorStream.Format)
{
case ColorImageFormat.RawYuvResolution640x480Fps15:
case ColorImageFormat.RgbResolution640x480Fps30:
case ColorImageFormat.YuvResolution640x480Fps15:
width = 640;
height = 480;
break;
case ColorImageFormat.RgbResolution1280x960Fps12:
width = 1280;
height = 960;
break;
}
}
else if (sensor.DepthStream.IsEnabled)
{
var depthPoint = coordinateMapper.MapSkeletonPointToDepthPoint(position, sensor.DepthStream.Format);
x = depthPoint.X;
y = depthPoint.Y;
switch (sensor.DepthStream.Format)
{
case DepthImageFormat.Resolution80x60Fps30:
width = 80;
height = 60;
break;
case DepthImageFormat.Resolution320x240Fps30:
width = 320;
height = 240;
break;
case DepthImageFormat.Resolution640x480Fps30:
width = 640;
height = 480;
break;
}
}
else
{
width = 1;
height = 1;
}
return new Vector2(x / width, y / height);
}
示例2: Convert
public static Vector2 Convert(CoordinateMapper mapper, SkeletonPoint position,
Object format = null)
{
float width = 0;
float height = 0;
float x = 0;
float y = 0;
if (format != null) {
if (format is ColorImageFormat) {
var colorFormat = (ColorImageFormat)format;
var colorPoint = mapper.MapSkeletonPointToColorPoint(position, colorFormat);
x = colorPoint.X;
y = colorPoint.Y;
switch (colorFormat) {
case ColorImageFormat.RawYuvResolution640x480Fps15:
case ColorImageFormat.RgbResolution640x480Fps30:
case ColorImageFormat.YuvResolution640x480Fps15:
width = 640;
height = 480;
break;
case ColorImageFormat.RgbResolution1280x960Fps12:
width = 1280;
height = 960;
break;
}
} else if (format is DepthImageFormat) {
var depthFormat = (DepthImageFormat)format;
var depthPoint = mapper.MapSkeletonPointToDepthPoint(position, depthFormat);
x = depthPoint.X;
y = depthPoint.Y;
switch (depthFormat) {
case DepthImageFormat.Resolution80x60Fps30:
width = 80;
height = 60;
break;
case DepthImageFormat.Resolution320x240Fps30:
width = 320;
height = 240;
break;
case DepthImageFormat.Resolution640x480Fps30:
width = 640;
height = 480;
break;
}
} else {
width = 1;
height = 1;
}
}
return new Vector2(x / width, y / height);
}
示例3: determineLimb
public List<Tuple<double, double, List<List<Point3D>>>> determineLimb(PointCloud pcdexisting, double weight)
{
//pull in skeleton measures from a temporary file for corbett.parse for now.
kinectInterp = new KinectInterpreter(skeloutline);
Dictionary<String, double[]> jointDepthsStr = new Dictionary<String, double[]>();
//temporary tuple for results
Tuple<double, double, List<List<Point3D>>> T = new Tuple<double, double, List<List<Point3D>>>(0,0,null);
//permanent list of tuples for passing back to coreLoader
List<Tuple<double, double, List<List<Point3D>>>> limbMeasures = new List<Tuple<double,double,List<List<Point3D>>>>();
//Test if we have a kinect otherwise we cannot use coordinate mapper.
if (KinectSensor.KinectSensors.Count > 0)
{
//test if we have already enumerated joint depths, if so, this has followed a recent scan.
if (jointDepths.Count == 0)
{
StreamReader sr = new StreamReader("SKEL.ptemp");
String line;
while ((line = sr.ReadLine()) != null)
{
String[] joint = Regex.Split(line, ":");
String[] positions = Regex.Split(joint[1], ",");
double[] jointPos = { Convert.ToDouble(positions[0]), Convert.ToDouble(positions[1]), Convert.ToDouble(Regex.Split(positions[2], "\n")[0]) };
//convert to depth co-ordinate space
SkeletonPoint sp = new SkeletonPoint();
sp.X = (float)Convert.ToDouble(jointPos[1]);
sp.Y = (float)Convert.ToDouble(jointPos[2]);
sp.Z = (float)Convert.ToDouble(jointPos[0]);
CoordinateMapper cm = new CoordinateMapper(kinectInterp.kinectSensor);
DepthImagePoint dm = cm.MapSkeletonPointToDepthPoint(sp, DepthImageFormat.Resolution640x480Fps30);
//convert x and y co-ords to arbitrary point cloud space
Tuple<double, double, double> convertedPoints = LimbCalculator.convertToPCCoords(dm.X, dm.Y, sp.Z);
double[] jointPos2 = { convertedPoints.Item3, convertedPoints.Item1, convertedPoints.Item2 };
//place back into jointDepths array in terms of depth space.
jointDepthsStr.Add(joint[0], jointPos2);
}
}
else
{
//we have some live skeleton depths, enumerate into strings
foreach(JointType j in jointDepths.Keys) {
jointDepthsStr = new Dictionary<String, double[]>();
jointDepthsStr.Add(j.ToString(),jointDepths[j]);
}
}
for (int limbArea = 1; limbArea <= 8; limbArea++)
{
//pass point cloud and correct bounds to Limb Calculator
//shoulders is first option in list so pass first.
limbMeasures.Add(LimbCalculator.calculateLimbBounds(pcdexisting, jointDepthsStr, limbArea, weight));
}
}
else
{
MessageBoxResult result = System.Windows.MessageBox.Show(this, "You need a Kinect to perform this action.",
"Kinect Sensor Missing", MessageBoxButton.OK, MessageBoxImage.Stop);
}
//change colour of point cloud for limb selection mode
gv.setMaterial();
this.DataContext = gv;
return limbMeasures;
}
示例4: sensor_SkeletonFrameReady
/// <summary>
/// Depth Frame Ready
/// Kinect tracks skeletons, localize the Skeleton Hand Joint Position
/// Map Skeleton point given by Kinect to PointDepth3D , Add result to HandTracker's List buffer for computing every frame
/// </summary>
/// <param name="sender"></param>
/// <param name="e"></param>
void sensor_SkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
{
using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
{
if (skeletonFrame != null)
{
if (skeletonData.Length != skeletonFrame.SkeletonArrayLength)
{
skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
}
skeletonFrame.CopySkeletonDataTo(skeletonData);
int personCount = 0;
foreach (Skeleton sk in skeletonData)
{
if (sk.TrackingState == SkeletonTrackingState.Tracked)
{
//Skeleton point map to depth point given by Kinect
CoordinateMapper mapper = new CoordinateMapper(this.sensor);
DepthImagePoint left = mapper.MapSkeletonPointToDepthPoint(
sk.Joints[JointType.HandLeft].Position,
this.sensor.DepthStream.Format);
DepthImagePoint right = mapper.MapSkeletonPointToDepthPoint(
sk.Joints[JointType.HandRight].Position,
this.sensor.DepthStream.Format);
rightWrist = new PointSkeleton3D(sk.Joints[JointType.WristRight].Position);
rightHand = new PointSkeleton3D(sk.Joints[JointType.HandRight].Position);
rightElbow = new PointSkeleton3D(sk.Joints[JointType.ElbowRight].Position);
personCount++;
}
}
skeletonReadyFlag = (personCount == 0 ? false : true);
}
}
}
示例5: KinectSensorOnAllFramesReady
private void KinectSensorOnAllFramesReady(object sender, AllFramesReadyEventArgs e)
{
if (frameProccessed[1] == false)
{
frameProccessed[1] = true;
}
else
{
frameProccessed[1] = false;
return;
}
ColorImageFrame colorImageFrame = null;
DepthImageFrame depthImageFrame = null;
SkeletonFrame skeletonFrame = null;
try
{
colorImageFrame = e.OpenColorImageFrame();
depthImageFrame = e.OpenDepthImageFrame();
skeletonFrame = e.OpenSkeletonFrame();
if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
{
return;
}
if (this.depthImageFormat != depthImageFrame.Format)
{
this.depthImage = null;
this.depthImageFormat = depthImageFrame.Format;
}
if (this.colorImageFormat != colorImageFrame.Format)
{
this.colorImage = null;
this.colorImageFormat = colorImageFrame.Format;
}
if (this.depthImage == null)
{
this.depthImage = new short[depthImageFrame.PixelDataLength];
}
if (this.colorImage == null)
{
this.colorImage = new byte[colorImageFrame.PixelDataLength];
}
if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
{
this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
}
colorImageFrame.CopyPixelDataTo(this.colorImage);
depthImageFrame.CopyPixelDataTo(this.depthImage);
skeletonFrame.CopySkeletonDataTo(this.skeletonData);
}
finally
{
if (colorImageFrame != null)
{
colorImageFrame.Dispose();
}
if (depthImageFrame != null)
{
depthImageFrame.Dispose();
}
if (skeletonFrame != null)
{
skeletonFrame.Dispose();
}
using (depthImageFrame)
{
if (depthImageFrame != null && skeletonData != null)
{
foreach (Skeleton sd in skeletonData)
{
if (sd.TrackingState == SkeletonTrackingState.Tracked || sd.TrackingState == SkeletonTrackingState.PositionOnly)
{
Joint joint = sd.Joints[JointType.Head];
DepthImagePoint depthPoint;
CoordinateMapper coordinateMapper = new CoordinateMapper(frontSensor);
depthPoint = coordinateMapper.MapSkeletonPointToDepthPoint(joint.Position, DepthImageFormat.Resolution320x240Fps30);
point = new System.Windows.Point((int)(frontSensor.ColorStream.FrameWidth * depthPoint.X
/ depthImageFrame.Width),
(int)(frontSensor.ColorStream.FrameHeight * depthPoint.Y
/ depthImageFrame.Height));
/* textBlock1.Text = string.Format("X:{0:0.00} Y:{1:0.00} Z:{2:0.00}",
point.X,
point.Y,
joint.Position.Z); */
Canvas.SetLeft(headEllipse, point.X - headEllipse.Width / 2);
//.........这里部分代码省略.........
示例6: mainKinectSensor_AllFramesReady
/// <summary>
/// Gets called if the RGB, depth and skeleton frame is ready
/// </summary>
/// <param name="sender"></param>
/// <param name="e"></param>
private void mainKinectSensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
if (!tutorial) return;
Skeleton mainSkeleton = getFirstSkeleton(e); //get the first skeleton
if (mainSkeleton == null) return; //return if the Kinect does not recognize any skeletons
CoordinateMapper mapper = new CoordinateMapper(mainKinectSensor); //mapper between skeleton and depth image
if (!corrected)
{
DepthImagePoint pointHead = mapper.MapSkeletonPointToDepthPoint(mainSkeleton.Joints[JointType.Head].Position, DepthImageFormat.Resolution640x480Fps30);
if (pointHead.Y < 100)
{
mainKinectSensor.ElevationAngle += Convert.ToInt32((120 - pointHead.Y) / TILT_FACTOR);
corrected = true;
return;
}
if (pointHead.Y > 140)
{
mainKinectSensor.ElevationAngle -= Convert.ToInt32((pointHead.Y - 120) / TILT_FACTOR);
corrected = true;
return;
}
}
corrected = true;
DepthImagePoint pointRight = mapper.MapSkeletonPointToDepthPoint(mainSkeleton.Joints[JointType.HandRight].Position, DepthImageFormat.Resolution640x480Fps30); //get the right hand
DepthImagePoint pointLeft = mapper.MapSkeletonPointToDepthPoint(mainSkeleton.Joints[JointType.HandLeft].Position, DepthImageFormat.Resolution640x480Fps30); //get the left hand
if (rotation & Math.Abs(pointLeft.X - pointRight.X) < HANDS_DISTANCE)
{
mainWindow.setHandMarkers(pointRight.X, pointRight.Y, pointLeft.X, pointLeft.Y, true);
if (!playing) return;
if (oldAngle == -1)
{
oldAngle = pointLeft.Y - pointRight.Y;
}
room.rotateCurrentElement(((pointLeft.Y - pointRight.Y) - oldAngle) / 2);
oldAngle = pointLeft.Y - pointRight.Y;
}
else
{
oldAngle = -1;
mainWindow.setHandMarkers(pointRight.X, pointRight.Y, pointLeft.X, pointLeft.Y, false);
}
if (!playing) return;
if (absolute)
{
if (zMax == -1 && zMin == -1)
{
zMax = pointRight.Depth + 400;
zMin = pointRight.Depth - 400;
}
if (pointRight.Depth > zMax) //adapt the Z-Range if the player goes out of it
{
zMax = pointRight.Depth;
zMin = pointRight.Depth - 800;
}
if (pointRight.Depth < zMin)
{
zMax = pointRight.Depth + 800;
zMin = pointRight.Depth;
}
Console.WriteLine(pointRight.Depth - zMin);
room.translateCurrentElementAbsolute(pointRight.X, pointRight.Y, pointRight.Depth - zMin); //absolute movement
}
else
{
if (oldx == -1) //relative movement
{
oldx = pointRight.X;
}
if (oldy == -1)
{
oldy = pointRight.Y;
}
if (oldz == -1)
{
oldz = pointRight.Depth;
}
room.translateCurrentElementRelative((pointRight.X - oldx) * Room.FACTOR_X, 0, 0);
room.translateCurrentElementRelative(0, (oldy - pointRight.Y) * Room.FACTOR_Y, 0);
room.translateCurrentElementRelative(0, 0, (pointRight.Depth - oldz) * Room.FACTOR_Z);
oldx = pointRight.X;
oldy = pointRight.Y;
//.........这里部分代码省略.........
示例7: GetCameraPoint
void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e)
{
using (DepthImageFrame depth = e.OpenDepthImageFrame())
{
if (depth == null ||
kinectSensorChooser1.Kinect == null)
{
return;
}
//Map a joint location to a point on the depth map
//head
CoordinateMapper mapper = new CoordinateMapper(sensor);
DepthImagePoint headDepthPoint =
mapper.MapSkeletonPointToDepthPoint(first.Joints[JointType.Head].Position, DepthImageFormat.Resolution640x480Fps30);
//left hand
DepthImagePoint leftDepthPoint =
mapper.MapSkeletonPointToDepthPoint(first.Joints[JointType.HandLeft].Position, DepthImageFormat.Resolution640x480Fps30);
//right hand
DepthImagePoint rightDepthPoint =
mapper.MapSkeletonPointToDepthPoint(first.Joints[JointType.HandRight].Position, DepthImageFormat.Resolution640x480Fps30);
//Map a depth point to a point on the color image
//head
ColorImagePoint headColorPoint =
mapper.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, headDepthPoint, ColorImageFormat.RgbResolution640x480Fps30);
//left hand
ColorImagePoint leftColorPoint =
mapper.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, leftDepthPoint,
ColorImageFormat.RgbResolution640x480Fps30);
//right hand
ColorImagePoint rightColorPoint =
mapper.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, rightDepthPoint,
ColorImageFormat.RgbResolution640x480Fps30);
SkeletonPoint rightHand = first.Joints[JointType.HandRight].Position;
SkeletonPoint leftHand = first.Joints[JointType.HandLeft].Position;
if (isInBoundingBox(rightHand))
{
CameraPosition(rightEllipse, rightColorPoint);
RightBlobs.Points.Add(new System.Windows.Point(rightColorPoint.X, rightColorPoint.Y));
//drawBlob(rightColorPoint);
tuioManager.addPoint(0, new PointF(rightColorPoint.X, rightColorPoint.Y));
}
else
{
rightColorPoint.X = 0; rightColorPoint.Y = 0;
CameraPosition(rightEllipse, rightColorPoint);
}
if (isInBoundingBox(leftHand))
{
CameraPosition(leftEllipse, leftColorPoint);
LeftBlobs.Points.Add(new System.Windows.Point(leftColorPoint.X, leftColorPoint.Y));
//drawBlob(leftColorPoint);
tuioManager.addPoint(1, new PointF(leftColorPoint.X, leftColorPoint.Y));
}
else
{
leftColorPoint.X = 0; leftColorPoint.Y = 0;
CameraPosition(leftEllipse, leftColorPoint);
}
//Console.WriteLine("X " + rightHand.X + "Y " + rightHand.Y + "Z " + rightHand.Z);
label1.Content = "X " + rightHand.X + "Y " + rightHand.Y + "Z " + rightHand.Z;
}
}
示例8: nui_ColorFrameReady
/*
//output real image
void nui_ColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
{
ColorImageFrame ImageParam = e.OpenColorImageFrame();
if (ImageParam == null) return;
byte[] ImageBits = new byte[ImageParam.PixelDataLength];
ImageParam.CopyPixelDataTo(ImageBits);
BitmapSource src = null;
src = BitmapSource.Create(ImageParam.Width, //create bitmap source
ImageParam.Height,
96, 96,
PixelFormats.Bgr32, //Bgra32 pixel format, r g, b, a that each 8 bits
null,
ImageBits, // real image data
ImageParam.Width * ImageParam.BytesPerPixel);
image1.Source = src; //Stopped image
}
*/
void nui_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
if (!((KinectSensor)sender).SkeletonStream.IsEnabled) { return; }
SkeletonFrame sf = e.OpenSkeletonFrame();
if (sf == null) return;
Skeleton[] skeltonData = new Skeleton[sf.SkeletonArrayLength];
//System.Diagnostics.Debug.WriteLine("SkeletonArrayLength: {0}", sf.SkeletonArrayLength);
sf.CopySkeletonDataTo(skeltonData);
using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
{
if (depthImageFrame != null)
{
foreach (Skeleton sd in skeltonData) //inhence statement in JAVA
{
if (sd.TrackingState == SkeletonTrackingState.Tracked)
{
Joint joint = sd.Joints[JointType.HandRight];
/*Coordinaate transform*/
CoordinateMapper coordMapper = new CoordinateMapper(nui);
DepthImagePoint depthPoint = coordMapper.MapSkeletonPointToDepthPoint(joint.Position, depthImageFrame.Format);
Point point = new Point((int)(image1.Width * depthPoint.X / depthImageFrame.Width),
(int)(image1.Height * depthPoint.Y / depthImageFrame.Height));
Canvas.SetLeft(Image6, point.X);
Canvas.SetTop(Image6, point.Y);
/**********************MENU 1********************/
if (menu_flag1)
{
System.Diagnostics.Debug.WriteLine("Inside Menu 1");
//////////////////Button1 : game start after 3 seconds if wait on the button
if ((point.X > 100 && point.X < 500) && (point.Y > 100 && point.Y < 200) && menu_flag1)
{
System.Diagnostics.Debug.WriteLine("Inside game start Button1");
//game_start(timer_flag);
if (timer_flag)
{
myTimer.Tick += new EventHandler(myTimer_Tick_Start);//call the myTimer_Tick_Start function
myTimer.Stop();
timer_flag = false;
myTimer.Interval = 3000;
myTimer.Start();
}
}
//////////////////Button2 : quit game after 3 seconds if wait on the button
else if ((point.X > 100 && point.X < 500) && (point.Y > 300 && point.Y < 400) && menu_flag1)
{
System.Diagnostics.Debug.WriteLine("Inside quit game Button2");
if (timer_flag)
{
myTimer.Tick += new EventHandler(myTimer_Tick_Finish);//call the myTimer_Tick_Finish Function
myTimer.Stop();
timer_flag = false;
myTimer.Interval = 3000;
myTimer.Start();
}
}
}
/****************************MENU 2*************************************/
if (menu_flag2)
{
System.Diagnostics.Debug.WriteLine("Inside Menu 2");
//////////////////Button1 : Create game after 3 seconds if wait on the button
if ((point.X > 75 && point.X < 200) && (point.Y > 100 && point.Y < 250) && menu_flag2)
{
//.........这里部分代码省略.........
示例9: ProcessFrame
internal void ProcessFrame(CoordinateMapper mapper, Skeleton skeletonOfInterest, DepthImageFormat depthImageFormat)
{
_joints.Clear();
if (skeletonOfInterest != null)
{
var size = FormatHelper.GetDepthSize(depthImageFormat);
var depthWidth = (int)size.Width;
var headJoint = skeletonOfInterest.Joints[JointType.Head];
var neckJoint = skeletonOfInterest.Joints[JointType.ShoulderCenter];
var _headPoint = mapper.MapSkeletonPointToDepthPoint(headJoint.Position, depthImageFormat);
var _neckPoint = mapper.MapSkeletonPointToDepthPoint(neckJoint.Position, depthImageFormat);
_headPoint.X = depthWidth - _headPoint.X;
_neckPoint.X = depthWidth - _neckPoint.X;
_joints.Add(_headPoint);
_joints.Add(_neckPoint);
}
RaiseFrameUpdated();
}
示例10: GetDisplayPosition
/// <summary>
/// Returns the position of a 3D vector on a 2D surface in depth space
/// </summary>
/// <param name="j">current instance</param>
/// <param name="nui">NUI Runtime</param>
/// <param name="panel">The canvas which to project the vector on</param>
/// <returns>2D position</returns>
public static Point GetDisplayPosition(this NUIVector j, KinectSensor nui, Canvas panel)
{
float depthX, depthY;
//nui.SkeletonStream.SkeletonToDepthImage(j, out depthX, out depthY);
CoordinateMapper cm = new CoordinateMapper(nui);
DepthImagePoint dip = cm.MapSkeletonPointToDepthPoint(j, nui.DepthStream.Format);
depthX = dip.X;
depthY = dip.Y;
// crop to panel? - yields 320x240 all the time o.O
//int X = (int)Math.Max(0, Math.Min(depthX * panel.ActualWidth, panel.ActualWidth));
//int Y = (int)Math.Max(0, Math.Min(depthY * panel.ActualHeight, panel.ActualHeight));
return new Point(depthX, depthY);
}
示例11: GetCameraPoint
//DEPTH_END
void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e)
{
using (DepthImageFrame depth = e.OpenDepthImageFrame())
{
if (depth == null || kinectSensorChooser1.Kinect == null)
{
return;
}
CoordinateMapper cm = new CoordinateMapper(kinectSensorChooser1.Kinect);
DepthImagePoint headDepthPoint = cm.MapSkeletonPointToDepthPoint(first.Joints[JointType.Head].Position,DepthImageFormat.Resolution640x480Fps30);
DepthImagePoint leftDepthPoint = cm.MapSkeletonPointToDepthPoint(first.Joints[JointType.HandLeft].Position, DepthImageFormat.Resolution640x480Fps30);
DepthImagePoint rightDepthPoint = cm.MapSkeletonPointToDepthPoint(first.Joints[JointType.HandRight].Position, DepthImageFormat.Resolution640x480Fps30);
ColorImagePoint headColorPoint = cm.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, headDepthPoint, ColorImageFormat.RgbResolution640x480Fps30);
ColorImagePoint leftColorPoint = cm.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, leftDepthPoint, ColorImageFormat.RgbResolution640x480Fps30);
ColorImagePoint rightColorPoint = cm.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, rightDepthPoint, ColorImageFormat.RgbResolution640x480Fps30);
//=> CameraPosition(headImage, headColorPoint);
//=> CameraPosition(leftEllipse, leftColorPoint);
//=> CameraPosition(rightEllipse, rightColorPoint);
}
}