本文整理汇总了C#中Microsoft.Kinect.DepthImageFrame.CopyDepthImagePixelDataTo方法的典型用法代码示例。如果您正苦于以下问题:C# DepthImageFrame.CopyDepthImagePixelDataTo方法的具体用法?C# DepthImageFrame.CopyDepthImagePixelDataTo怎么用?C# DepthImageFrame.CopyDepthImagePixelDataTo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Microsoft.Kinect.DepthImageFrame
的用法示例。
在下文中一共展示了DepthImageFrame.CopyDepthImagePixelDataTo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: synchronize
public void synchronize(
DepthImageFrame depthFrame,
ColorImageFrame colorFrame,
SkeletonFrame skletonFrame,
Boolean isPauseMode
)
{
IsPauseMode = isPauseMode;
colorFrame.CopyPixelDataTo(_colorByte);
//Console.WriteLine("max depth: "+depthFrame.MaxDepth);
depthFrame.CopyDepthImagePixelDataTo(_depthPixels);
_sensor.CoordinateMapper.MapColorFrameToDepthFrame(
ColorImageFormat.RgbResolution640x480Fps30,
DepthImageFormat.Resolution640x480Fps30,
_depthPixels,
_depthPoint
);
for (int i = 0; i < _pixelDepthDataLength; i++)
{
_depthShort[i] = (short)_depthPoint[i].Depth;
_depthByte[i] = (byte)(_depthPoint[i].Depth*0.064-1);
}
skletonFrame.CopySkeletonDataTo(totalSkeleton);
Skeleton firstSkeleton = (from trackskeleton in totalSkeleton
where trackskeleton.TrackingState == SkeletonTrackingState.
Tracked
select trackskeleton).FirstOrDefault();
_isCreation = true;
if (firstSkeleton != null)
{
if (firstSkeleton.Joints[JointType.Spine].TrackingState == JointTrackingState.Tracked)
{
IsSkeletonDetected = true;
UserSkeleton[SkeletonDataType.RIGHT_HAND] =
ScalePosition(firstSkeleton.Joints[JointType.HandRight].Position);
UserSkeleton[SkeletonDataType.LEFT_HAND] =
ScalePosition(firstSkeleton.Joints[JointType.HandLeft].Position);
UserSkeleton[SkeletonDataType.SPINE] =
ScalePosition(firstSkeleton.Joints[JointType.Spine].Position);
return;
}
}
IsSkeletonDetected = false;
_isCreation = false;
}
示例2: Record
public void Record(DepthImageFrame frame)
{
writer.Write((int) FrameType.Depth);
var timeSpan = DateTime.Now.Subtract(referenceTime);
referenceTime = DateTime.Now;
writer.Write((long) timeSpan.TotalMilliseconds);
writer.Write(frame.BytesPerPixel);
writer.Write((int) frame.Format);
writer.Write(frame.Width);
writer.Write(frame.Height);
writer.Write(frame.FrameNumber);
var shorts = new short[frame.PixelDataLength];
//frame.CopyPixelDataTo(shorts);
frame.CopyDepthImagePixelDataTo(this._tmpDepthPixels);
_sensor.CoordinateMapper.MapColorFrameToDepthFrame(
ColorImageFormat.RgbResolution640x480Fps30,
DepthImageFormat.Resolution640x480Fps30,
this._tmpDepthPixels,
this._tmpDepthPoints
);
for (int i = 0; i < shorts.Length; i++)
{
shorts[i] = (short)this._tmpDepthPoints[i].Depth;
}
try
{
writer.Write(shorts.Length);
foreach (var s in shorts)
writer.Write(s);
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
}
示例3: depthFrameSetUp
void depthFrameSetUp(DepthImageFrame frame, DepthImagePixel[] depthPixels, Color[] depthArray, RenderTarget2D depthTarget)
{
depthFramyBusy = true;
using (frame)
{
if (frame != null)
{
//Console.WriteLine("Has frame");
frame.CopyDepthImagePixelDataTo(depthPixels);
for (int i = 0; i < depthPixels.Length; i++)
{
int b = (depthPixels[i].Depth >= frame.MinDepth && depthPixels[i].Depth <= frame.MaxDepth && depthPixels[i].IsKnownDepth) ? depthPixels[i].Depth : 0;
if (depthPixels[i].Depth >= frame.MaxDepth)
{
b = frame.MaxDepth;
}
float f = (float)((float)b - frame.MinDepth) / (float)(frame.MaxDepth - frame.MinDepth);
depthArray[i] = new Color(f, f, f, 1);
}
depthTarget.SetData(depthArray);
DepthImagePoint[] dip = new DepthImagePoint[kinect.ColorStream.FrameWidth * kinect.ColorStream.FrameHeight];
kinect.CoordinateMapper.MapColorFrameToDepthFrame(kinect.ColorStream.Format,
kinect.DepthStream.Format, depthPixels, dip);
for (int i = 0; i < depthCoordArray.Length; i++)
{
depthCoordArray[i] = new Color((float)dip[i].X / (float)kinect.ColorStream.FrameWidth, (float)dip[i].Y / (float)kinect.ColorStream.FrameHeight, 0, 1);
}
depthCoordMap.SetData(depthCoordArray);
}
else
{
}
}
depthFramyBusy = false;
}
示例4: ReconhecerDistancia
private void ReconhecerDistancia(DepthImageFrame quadro, byte[] bytesImagem, int distanciaMaxima)
{
if (quadro == null || bytesImagem == null) return;
using (quadro)
{
DepthImagePixel[] imagemProfundidade = new DepthImagePixel[quadro.PixelDataLength];
quadro.CopyDepthImagePixelDataTo(imagemProfundidade);
for (int indice = 0; indice < imagemProfundidade.Length; indice++)
{
if (imagemProfundidade[indice].Depth < distanciaMaxima)
{
int indiceImageCores = indice * 4;
byte maiorValorCor = Math.Max(bytesImagem[indiceImageCores], Math.Max(bytesImagem[indiceImageCores + 1], bytesImagem[indiceImageCores + 2]));
bytesImagem[indiceImageCores] = maiorValorCor;
bytesImagem[indiceImageCores + 1] = maiorValorCor;
bytesImagem[indiceImageCores + 2] = maiorValorCor;
}
}
}
}
开发者ID:gilgaljunior,项目名称:CrieAplicacoesInterativascomoMicrosoftKinect,代码行数:23,代码来源:MainWindow.xaml.cs
示例5: TDepthFrame
public TDepthFrame(DepthImageFrame sensorFrame)
{
//TODO This can be done better
var depthImagePixels = new DepthImagePixel[sensorFrame.PixelDataLength];
sensorFrame.CopyDepthImagePixelDataTo(depthImagePixels);
var depthData = new short[sensorFrame.PixelDataLength];
for (int i = 0; i < sensorFrame.PixelDataLength; i++)
depthData[i] = depthImagePixels[i].Depth;
DepthData = depthData;
PixelDataLength = sensorFrame.PixelDataLength;
BytesPerPixel = sensorFrame.BytesPerPixel;
FrameNumber = sensorFrame.FrameNumber;
Width = sensorFrame.Width;
Height = sensorFrame.Height;
Timestamp = sensorFrame.Timestamp;
MinDepth = sensorFrame.MinDepth;
MaxDepth = sensorFrame.MaxDepth;
}
示例6: ReconhecerHumanos
private BitmapSource ReconhecerHumanos(DepthImageFrame quadro)
{
if (quadro == null) return null;
using (quadro)
{
DepthImagePixel[] imagemProfundidade =
new DepthImagePixel[quadro.PixelDataLength];
quadro.CopyDepthImagePixelDataTo(imagemProfundidade);
byte[] bytesImagem = new byte[imagemProfundidade.Length * 4];
for (int indice = 0; indice < bytesImagem.Length; indice += 4)
{
if (imagemProfundidade[indice / 4].PlayerIndex != 0)
{
bytesImagem[indice + 1] = 255;
}
}
return BitmapSource.Create(quadro.Width, quadro.Height,
96, 96, PixelFormats.Bgr32, null, bytesImagem,
quadro.Width * 4);
}
}
示例7: CalculateBlockPercentage
private double CalculateBlockPercentage(DepthImageFrame depthFrame)
{
DepthImagePixel[] depthPixels;
depthPixels = new DepthImagePixel[KinectSensor.DepthStream.FramePixelDataLength];
depthFrame.CopyDepthImagePixelDataTo(depthPixels);
int closePixel = 0;
short constrain = (short)(STimSettings.CloseZoneConstrain);
for (int i = 0; i < depthPixels.Length; ++i)
{
closePixel += (depthPixels[i].Depth <= constrain ? 1 : 0);
}
double rawPercent = (double)(closePixel * 100) / (double)depthPixels.Length;
return DepthPercentF.ProcessNewPercentageData(rawPercent);
}
示例8: getDepthAtPoint
// Returns depth at a pixel position from a depth frame
short getDepthAtPoint(int[] position, DepthImageFrame frame)
{
if (frame != null)
{
frame.CopyDepthImagePixelDataTo(this.depthPixels);
int x = depthPixels.Length;
int pix = 640 * (position[1]) + position[0];
short depth = depthPixels[pix].Depth;
return depth;
}
else
{
return 0;
}
}
示例9: GetROI
void GetROI(Skeleton user, DepthImageFrame depthFrame , ColorImageFrame color_frame = null)
{
// Map skeleton to Depth
DepthImagePoint rightHandPoint =
_sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(user.Joints[JointType.HandRight].Position, DepthImageFormat.Resolution640x480Fps30);
DepthImagePoint rightWristPoint =
_sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(user.Joints[JointType.WristRight].Position, DepthImageFormat.Resolution640x480Fps30);
int hand_depth = (rightHandPoint.Depth>rightWristPoint.Depth)?rightHandPoint.Depth:rightWristPoint.Depth+10; // hand depth used for segmenting out the hand
//*********************************** Map The depth Image to color Image to align the color image************************************************************************
DepthImagePixel[] depthImagePixels = new DepthImagePixel[depthFrame.PixelDataLength];
depthFrame.CopyDepthImagePixelDataTo(depthImagePixels);
short[] rawDepthData = new short[depthFrame.PixelDataLength];
depthFrame.CopyPixelDataTo(rawDepthData);
ColorImagePoint[] mapped_depth_locations = new ColorImagePoint[depthFrame.PixelDataLength];
_sensor.CoordinateMapper.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, depthImagePixels, ColorImageFormat.RgbResolution640x480Fps30, mapped_depth_locations);
byte[] aligned_colorPixels = new byte[color_frame.PixelDataLength]; // creating a byte array for storing the aligned pixel values
byte[] original_colorPixels = new byte[color_frame.PixelDataLength];
color_frame.CopyPixelDataTo(original_colorPixels);
int aligned_image_index = 0;
//int hand_baseindex = rightHandPoint.Y*640 + rightHandPoint.X;
for (int i = 0; i < mapped_depth_locations.Length; i++)
{
int depth = rawDepthData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth;
//Console.WriteLine(depth);
ColorImagePoint point = mapped_depth_locations[i];
if ((point.X >= 0 && point.X < 640) && (point.Y >= 0 && point.Y < 480))
{
int baseIndex = (point.Y * 640 + point.X) * 4;
if (depth < hand_depth && depth != -1)
{
aligned_colorPixels[aligned_image_index] = original_colorPixels[baseIndex];
aligned_colorPixels[aligned_image_index + 1] = original_colorPixels[baseIndex + 1];
aligned_colorPixels[aligned_image_index + 2] = original_colorPixels[baseIndex + 2];
aligned_colorPixels[aligned_image_index + 3] = 0;
}
else
{
aligned_colorPixels[aligned_image_index] = 0;
aligned_colorPixels[aligned_image_index + 1] = 0;
aligned_colorPixels[aligned_image_index + 2] = 0;
aligned_colorPixels[aligned_image_index + 3] = 0;
}
}
aligned_image_index = aligned_image_index + 4;
// *************************** Now modify the contents of this aligned_colorBitmap using the depth information ***************************************************
}
//***********************************************************************************************************************************************************************
int threshold = 20;
int hand_length = 3 * Math.Max(Math.Abs(rightHandPoint.X - rightWristPoint.X), Math.Abs(rightHandPoint.Y - rightWristPoint.Y));
// int hand_length = (int)Math.Sqrt((rightHandPoint.X - rightWristPoint.X) ^ 2 + (rightHandPoint.Y - rightWristPoint.Y) ^ 2);
int hand_length_old = hand_length;
//****************************Low pass filter for hand_length*********************************
if (Math.Abs(hand_length - hand_length_old) > threshold)
hand_length = hand_length_old;
//************************************************************************************************
// Console.WriteLine(hand_length);
int top_left_X_depth = rightHandPoint.X - hand_length;
int top_left_Y_depth = rightHandPoint.Y - hand_length;
int top_left_Z_depth = rightHandPoint.Depth;
top_left_X_depth = (top_left_X_depth<0)? 0 : top_left_X_depth;
top_left_Y_depth = (top_left_Y_depth<0)? 0 : top_left_Y_depth;
DepthImagePoint top_left = new DepthImagePoint();
top_left.X = top_left_X_depth;
//.........这里部分代码省略.........
示例10: ProcessDepthAndSkeletonFrames
private void ProcessDepthAndSkeletonFrames(DepthImageFrame depthFrame, SkeletonFrame skeletonFrame)
{
depthFrame.CopyDepthImagePixelDataTo(depthFrameData);
coordinateMapper.MapDepthFrameToSkeletonFrame(DepthFormat, depthFrameData, skeletonPointData);
skeletonFrame.CopySkeletonDataTo(skeletonFrameData);
Skeleton skeleton = skeletonFrameData.FirstOrDefault(s => s.TrackingState == SkeletonTrackingState.Tracked);
if (skeleton != null)
{
hipCenter = skeleton.Joints[JointType.HipCenter];
handLeft = skeleton.Joints[JointType.HandLeft];
handRight = skeleton.Joints[JointType.HandRight];
}
if (handLeft.TrackingState != JointTrackingState.NotTracked && handRight.TrackingState != JointTrackingState.NotTracked)
{
LocateHands();
IdentifyGestures(LeftHand);
}
}
示例11: find_code
int find_code(ColorImageFrame colorFrame, DepthImageFrame depthFrame)
{
ZXing.Kinect.BarcodeReader reader = new ZXing.Kinect.BarcodeReader();
if (colorFrame != null)
{
//Decode the colorFrame
var result = reader.Decode(colorFrame);
if (result != null)
{
string val = result.Text;
int code_num = Convert.ToInt32(val);
double center_x = result.ResultPoints[0].X + 0.5 * (result.ResultPoints[2].X - result.ResultPoints[0].X);
double center_y = result.ResultPoints[0].Y + 0.5 * (result.ResultPoints[2].Y - result.ResultPoints[0].Y);
code_size = new Point((result.ResultPoints[2].X - result.ResultPoints[0].X), (result.ResultPoints[2].Y - result.ResultPoints[0].Y));
// Must mirror the coordinate here -- the depth frame comes in mirrored.
center_x = 640 - center_x;
// Map the color frame onto the depth frame
DepthImagePixel[] depthPixel = new DepthImagePixel[depthFrame.PixelDataLength];
depthFrame.CopyDepthImagePixelDataTo(depthPixel);
DepthImagePoint[] depthImagePoints = new DepthImagePoint[sensor.DepthStream.FramePixelDataLength];
sensor.CoordinateMapper.MapColorFrameToDepthFrame(sensor.ColorStream.Format, sensor.DepthStream.Format, depthPixel, depthImagePoints);
// Get the point in the depth frame at the center of the barcode
int center_point_color_index = (int)center_y * 640 + (int)center_x;
DepthImagePoint converted_depth_point = depthImagePoints[center_point_color_index];
Point p = new Point(converted_depth_point.X, converted_depth_point.Y);
code_points[code_num] = p;
Console.WriteLine("Found code " + code_num + " at (" + center_x + ", " + center_y + ") in color coordinates.");
Console.WriteLine("Translated to (" + p.X + ", " + p.Y + ") in depth coordinates.");
return code_num;
}
}
return -1;
}
示例12: incomingDepthFrame
public void incomingDepthFrame(DepthImageFrame _df)
{
_df.CopyDepthImagePixelDataTo(_depthPixels);
}
开发者ID:guozanhua,项目名称:MFDetroit2013_Kinect_GreenScreen_PhotoKiosk,代码行数:4,代码来源:GreenScreenImplementation.cs
示例13: handleDepthImageFrame
private void handleDepthImageFrame(DepthImageFrame depthFrame)
{
using (depthFrame)
{
if (depthFrame != null)
{
DepthImagePixel[] depthPixels = new DepthImagePixel[depthFrame.PixelDataLength];
depthFrame.CopyDepthImagePixelDataTo(depthPixels);
ThreadPool.QueueUserWorkItem(new WaitCallback(o => DepthFrameCallback(depthFrame.Timestamp, depthFrame.FrameNumber, depthPixels)));
}
}
}
示例14: drawWhiteOnBlack
private void drawWhiteOnBlack(DepthImageFrame depthFrame)
{
depthFrame.CopyDepthImagePixelDataTo(this.depthPixels);
// Get the min and max reliable depth for the current players
//int minDepth = depthPixels[0].Depth;
//int maxDepth = depthPixels[0].Depth;
//double avgDepth = 0;
//int playerPixelCount = 0;
//for (int i = 0; i < this.depthPixels.Length; ++i)
//{
// if (depthPixels[i].PlayerIndex == 1)
// {
// short depth = this.depthPixels[i].Depth;
// minDepth = Math.Min(depth, minDepth);
// maxDepth = Math.Max(depth, maxDepth);
// avgDepth += depth;
// playerPixelCount++;
// }
//}
//avgDepth /= playerPixelCount;
// Convert the depth to RGB
int colorPixelIndex = 0;
for (int i = 0; i < this.depthPixels.Length; ++i)
{
if (depthPixels[i].PlayerIndex != 0)
{
this.depthColorPixels[colorPixelIndex++] = (byte)255;
this.depthColorPixels[colorPixelIndex++] = (byte)255;
this.depthColorPixels[colorPixelIndex++] = (byte)255;
this.depthColorPixels[colorPixelIndex++] = (byte) this.depthAlpha;
//short depth = this.depthPixels[i].Depth;
//// Write out blue byte
//this.colorPixels[colorPixelIndex++] = (byte)Math.Min(depth - avgDepth, 0);
//// Write out green byte
//this.colorPixels[colorPixelIndex++] = (byte)maxDepth;
//// Write out red byte
//this.colorPixels[colorPixelIndex++] = (byte)Math.Min(avgDepth - depth, 0);
//// Write alpha if Bgra, else unused...
//++colorPixelIndex;
}
else
{
byte intensity = (byte)0;
// Write out blue byte
this.depthColorPixels[colorPixelIndex++] = intensity;
// Write out green byte
this.depthColorPixels[colorPixelIndex++] = intensity;
// Write out red byte
this.depthColorPixels[colorPixelIndex++] = intensity;
// Write alpha if Bgra, else unused...
this.depthColorPixels[colorPixelIndex++] = (byte) this.depthAlpha;
}
}
this.depthColorBitmap.WritePixels(
new Int32Rect(0, 0, this.depthColorBitmap.PixelWidth, this.depthColorBitmap.PixelHeight),
this.depthColorPixels,
this.depthColorBitmap.PixelWidth * sizeof(int),
0);
}
示例15: ReconhecerDistancia
private void ReconhecerDistancia(DepthImageFrame quadro, byte[] bytesImagem, int maxDistancia)
{
if (quadro == null || bytesImagem == null)
return;
using (quadro)
{
DepthImagePixel[] imagemProfundidade = new DepthImagePixel[quadro.PixelDataLength];
quadro.CopyDepthImagePixelDataTo(imagemProfundidade);
DepthImagePoint[] pontosImagemProfundidade = new DepthImagePoint[640 * 480];
Kinect.CoordinateMapper
.MapColorFrameToDepthFrame(Kinect.ColorStream.Format,
Kinect.DepthStream.Format, imagemProfundidade,
pontosImagemProfundidade);
for (int i = 0; i < pontosImagemProfundidade.Length; i++)
{
var point = pontosImagemProfundidade[i];
if (point.Depth < maxDistancia && KinectSensor.IsKnownPoint(point))
{
var pixelDataIndex = i * 4;
byte maiorValorCor =
Math.Max(bytesImagem[pixelDataIndex],
Math.Max(bytesImagem[pixelDataIndex + 1],
bytesImagem[pixelDataIndex + 2]));
bytesImagem[pixelDataIndex] = maiorValorCor;
bytesImagem[pixelDataIndex + 1] = maiorValorCor;
bytesImagem[pixelDataIndex + 2] = maiorValorCor;
}
}
}
}