本文整理汇总了C#中AllFramesReadyEventArgs类的典型用法代码示例。如果您正苦于以下问题:C# AllFramesReadyEventArgs类的具体用法?C# AllFramesReadyEventArgs怎么用?C# AllFramesReadyEventArgs使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AllFramesReadyEventArgs类属于命名空间,在下文中一共展示了AllFramesReadyEventArgs类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: KinectAllFramesReady
public void KinectAllFramesReady(object sender,AllFramesReadyEventArgs e)
{
using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
{
if (skeletonFrame != null && screenManager != null)
{
// take skeleton data and update avatar state
skeletonFrame.CopySkeletonDataTo(skeletonData);
float headX = skeletonData[0].Joints[JointType.Head].Position.X; //floats between -1 and 1
float headY = skeletonData[0].Joints[JointType.Head].Position.Y;
midViewPort.X = screenManager.GraphicsDevice.Viewport.Width / 2;
midViewPort.Y = screenManager.GraphicsDevice.Viewport.Height / 2;
//set the posistion of the head's rectangle to be in the center of the screen and move by the joint amount
//TODO: figure out if skeleton data stream has lower left origin, because XNA has upper left origin and we adjust for that.
head.SetRectPos((int)((headX * 100) + midViewPort.X), (int)((headY * 100) + midViewPort.Y));
//head.SetRectPos((int)((headX * 100)), (int)((headY * 100) ));
//
Console.WriteLine( "head: " + head.Rectangle.X + ", " + head.Rectangle.Y );
Console.WriteLine("joint: " + headX + ", " + headY);
}
else
{
// skeletonFrame is null because the request did not arrive in time
}
}
}
示例2: kinect_AllFramesReady
void kinect_AllFramesReady( object sender, AllFramesReadyEventArgs e )
{
// 赤外線画像を表示する
using ( ColorImageFrame colorFrame = e.OpenColorImageFrame() ) {
if ( colorFrame != null ) {
// 赤外線画像データを取得する
byte[] color = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo( color );
// 赤外線画像を表示する(16bitのグレースケール)
imageInfrared.Source = BitmapSource.Create( colorFrame.Width, colorFrame.Height,
96, 96, PixelFormats.Gray16, null, color,
colorFrame.Width * colorFrame.BytesPerPixel );
}
}
// 距離データを表示する
using ( DepthImageFrame depthFrame = e.OpenDepthImageFrame() ) {
if ( depthFrame != null ) {
// 可視画像に変換する
short[] depth = new short[depthFrame.PixelDataLength];
depthFrame.CopyPixelDataTo( depth );
for ( int i = 0; i < depth.Length; i++ ) {
depth[i] = (short)~depth[i];
}
imageDepth.Source = BitmapSource.Create( depthFrame.Width, depthFrame.Height,
96, 96, PixelFormats.Gray16, null, depth,
depthFrame.Width * depthFrame.BytesPerPixel );
}
}
}
示例3: kinect_AllFramesReady
void kinect_AllFramesReady( object sender, AllFramesReadyEventArgs e )
{
image1.Source = e.OpenColorImageFrame().ToBitmapSource();
// スケルトンフレームを取得する
SkeletonFrame skeletonFrame = e.OpenSkeletonFrame();
if ( skeletonFrame != null ) {
// スケルトンデータを取得する
Skeleton[] skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
skeletonFrame.CopySkeletonDataTo( skeletonData );
// プレーヤーごとのスケルトンを描画する
foreach ( var skeleton in skeletonData ) {
var head = skeleton.Joints[JointType.Head];
if ( head.TrackingState == JointTrackingState.Tracked ) {
ColorImagePoint point = kinect.MapSkeletonPointToColor( head.Position, kinect.ColorStream.Format );
var x = image2.Width / 2;
var y = image2.Height / 2;
image2.Margin = new Thickness( point.X - x, point.Y - y, 0, 0 );
image2.Visibility = System.Windows.Visibility.Visible;
}
}
}
}
示例4: sensor_AllFramesReady
void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
//throw new NotImplementedException();
using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
{
// check for frame drop.
if (skeletonFrame == null)
{
return;
}
// copy the frame data in to the collection
skeletonFrame.CopySkeletonDataTo(totalSkeleton);
// get the first Tracked skeleton
Skeleton firstSkeleton = (from trackskeleton in totalSkeleton
where trackskeleton.TrackingState == SkeletonTrackingState.Tracked
select trackskeleton).FirstOrDefault();
// if the first skeleton returns null
if (firstSkeleton == null)
{
return;
}
this.myCanvas.Children.Clear();
this.DrawSkeleton(firstSkeleton);
recognitionEngine.Skeleton = firstSkeleton;
recognitionEngine.StartRecognize();
}
}
示例5: GetCameraPoint
void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e)
{
using (DepthImageFrame depth = e.OpenDepthImageFrame())
{
if (depth == null ||
_sensor == null)
{
return;
}
DepthImagePoint headDepthPoint = this._sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(
first.Joints[JointType.Head].Position, DepthImageFormat.Resolution640x480Fps30);
DepthImagePoint leftDepthPoint = this._sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(
first.Joints[JointType.HandLeft].Position, DepthImageFormat.Resolution640x480Fps30);
DepthImagePoint rightDepthPoint = this._sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(
first.Joints[JointType.HandRight].Position, DepthImageFormat.Resolution640x480Fps30);
ColorImagePoint headColorPoint = this._sensor.CoordinateMapper.MapDepthPointToColorPoint(
DepthImageFormat.Resolution640x480Fps30, headDepthPoint, ColorImageFormat.RgbResolution640x480Fps30);
ColorImagePoint leftColorPoint = this._sensor.CoordinateMapper.MapDepthPointToColorPoint(
DepthImageFormat.Resolution640x480Fps30, leftDepthPoint, ColorImageFormat.RgbResolution640x480Fps30);
ColorImagePoint rightColorPoint = this._sensor.CoordinateMapper.MapDepthPointToColorPoint(
DepthImageFormat.Resolution640x480Fps30, rightDepthPoint, ColorImageFormat.RgbResolution640x480Fps30);
CameraPosition(ellipseHead, headColorPoint);
CameraPosition(ellipseLeft, leftColorPoint);
CameraPosition(ellipseRight, rightColorPoint);
}
}
示例6: KinectSensorOnAllFramesReady
private void KinectSensorOnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
{
using (var colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame())
{
if (colorImageFrame == null)
{
return;
}
// Make a copy of the color frame for displaying.
var haveNewFormat = this.currentColorImageFormat != colorImageFrame.Format;
if (haveNewFormat)
{
this.currentColorImageFormat = colorImageFrame.Format;
this.colorImageData = new byte[colorImageFrame.PixelDataLength];
this.colorImageWritableBitmap = new WriteableBitmap(
colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null);
ColorImage.Source = this.colorImageWritableBitmap;
}
colorImageFrame.CopyPixelDataTo(this.colorImageData);
this.colorImageWritableBitmap.WritePixels(
new Int32Rect(0, 0, colorImageFrame.Width, colorImageFrame.Height),
this.colorImageData,
colorImageFrame.Width * Bgr32BytesPerPixel,
0);
}
}
示例7: sensor_AllFramesReady
void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
using (ColorImageFrame frame = e.OpenColorImageFrame())
{
if (frame == null)
{
return;
}
byte[] pixels = new byte[frame.PixelDataLength];
int stride = frame.Width * 4;
frame.CopyPixelDataTo(pixels);
imagecolor.Source = BitmapSource.Create(frame.Width, frame.Height, 96, 96, PixelFormats.Bgr32, null, pixels, stride);
Skeleton first = GetFirstSkeleton(e);
if (first == null)
{
return;
}
//set scaled position
//ScalePosition(lShoulderEllipse, first.Joints[JointType.ShoulderLeft]);
//ScalePosition(rShoulderEllipse, first.Joints[JointType.ShoulderRight]);
//ScalePosition(lKneeEllipse, first.Joints[JointType.KneeLeft]);
//ScalePosition(rKneeEllipse, first.Joints[JointType.KneeRight]);
//ScalePosition(rHandEllipse, first.Joints[JointType.HandRight]);
GetCameraPoint(first, e);
}
}
示例8: GetTrackedSkeleton
private Skeleton GetTrackedSkeleton(AllFramesReadyEventArgs e)
{
using (SkeletonFrame skeletonFrameData = e.OpenSkeletonFrame())
{
SkeletonDetected = false;
if (skeletonFrameData == null)
{
return null;
}
// Kinect SDK always returns 6 skeleton
const int skeletonCount = 6;
Skeleton[] allSkeletons = new Skeleton[skeletonCount];
skeletonFrameData.CopySkeletonDataTo(allSkeletons);
//Get the first tracked skeleton out of the 6
Skeleton trackedSkeleton = null;
foreach (Skeleton skeleton in allSkeletons)
{
// if no skeleton is tracked, null will be returned.
if (skeleton.TrackingState == SkeletonTrackingState.Tracked)
{
trackedSkeleton = skeleton;
SkeletonDetected = true;
break;
}
}
return trackedSkeleton;
}
}
示例9: sensor_AllFramesReady
void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (colorFrame != null)
{
// Create byte array with pixel data
byte[] pixels = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo(pixels);
// Bytes/row = 4 * with (for bgr32)
int stride = colorFrame.Width * 4;
// Display image
img_colorimage.Source = BitmapSource.Create(colorFrame.Width, colorFrame.Height, 96, 96, PixelFormats.Bgr32, null, pixels, stride);
}
}
// Auto dispose
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame != null)
{
// Set colors based on depth
byte[] pixels = GenerateColoredBytes(depthFrame);
// Bytes/row = 4 * with (for bgr32)
int stride = depthFrame.Width * 4;
// Display image
img_depthimage.Source = BitmapSource.Create(depthFrame.Width, depthFrame.Height, 96, 96, PixelFormats.Bgr32, null, pixels, stride);
}
}
// Auto dispose
}
示例10: AllFramesReady
private void AllFramesReady(object sender,AllFramesReadyEventArgs e)
{
using(ColorImageFrame colorImage=e.OpenColorImageFrame())
using(SkeletonFrame skeletonFrame=e.OpenSkeletonFrame())
if(colorImage!=null&&skeletonFrame!=null){
colorImage.CopyPixelDataTo(bitmappixels);
skeletonFrame.CopySkeletonDataTo(skeletons);
bitmap.WritePixels(updateRect,bitmappixels,bitmap.PixelWidth*sizeof(int),0);
using(DrawingContext drawingContext=drawingGroup.Open()){
drawingContext.DrawImage(bitmap,drawingRect);
//drawingContext.DrawGeometry(button1.IsHitting?Brushes.White:null,new Pen(Brushes.Blue,2.0),button1.Geometry);
//drawingContext.DrawGeometry(button2.IsHitting?Brushes.White:null,new Pen(Brushes.Blue,2.0),button2.Geometry);
foreach(Skeleton skel in skeletons){
if(skel.TrackingState==SkeletonTrackingState.Tracked){
foreach(Joint joint in skel.Joints){
if(joint.TrackingState==JointTrackingState.Tracked){
var depthPoint=sensor.MapSkeletonPointToDepth(joint.Position,DepthImageFormat.Resolution640x480Fps30);
drawingContext.DrawEllipse(Brushes.Green,null,new Point(depthPoint.X,depthPoint.Y),15,15);
}
}
drawingContext.DrawRectangle(Brushes.Red,null,new Rect(0.0,0.0,distance1.Distance,50.0));
drawingContext.DrawLine(new Pen(Brushes.Blue,10),volume1.MiddlePoint,volume1.RightHandLocation);
var mat=Matrix.Identity;
mat.RotateAt(volume1.Angle,volume1.MiddlePoint.X,volume1.MiddlePoint.Y);
drawingContext.DrawLine(new Pen(Brushes.Blue,10),volume1.MiddlePoint,mat.Transform(volume1.RightHandLocation));
drawingContext.DrawText(new FormattedText(volume1.Angle.ToString(),CultureInfo.CurrentCulture,FlowDirection.LeftToRight,new Typeface("MS Gothic"),150,Brushes.Blue),new Point());
break;
}
}
}
}
return;
}
示例11: _sensor_AllFramesReady
void _sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (colorFrame == null)
{
return;
}
byte[] pixels = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo(pixels);
int stride = colorFrame.Width * 4;
imageRGB.Source =
BitmapSource.Create(colorFrame.Width,
colorFrame.Height,
96,
96,
PixelFormats.Bgr32,
null,
pixels,
stride);
}
}
示例12: kinect_AllFramesReady
void kinect_AllFramesReady( object sender, AllFramesReadyEventArgs e )
{
using ( var colorFrame = e.OpenColorImageFrame() ) {
if ( colorFrame != null ) {
var pixel = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo( pixel );
ImageRgb.Source = BitmapSource.Create( colorFrame.Width, colorFrame.Height, 96, 96,
PixelFormats.Bgr32, null, pixel, colorFrame.Width * 4 );
}
}
using ( var depthFrame = e.OpenDepthImageFrame() ) {
if ( depthFrame != null ) {
// Depth情報を入れる
// GetRawPixelData()はインタラクションライブラリ内で実装された拡張メソッド
stream.ProcessDepth( depthFrame.GetRawPixelData(), depthFrame.Timestamp );
}
}
using ( var skeletonFrame = e.OpenSkeletonFrame() ) {
if ( skeletonFrame != null ) {
var skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
skeletonFrame.CopySkeletonDataTo( skeletons );
// スケルトン情報を入れる
stream.ProcessSkeleton( skeletons, kinect.AccelerometerGetCurrentReading(), skeletonFrame.Timestamp );
}
}
}
示例13: RuntimeDepthFrameReady
void RuntimeDepthFrameReady(AllFramesReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame == null)
{
return;
}
//turn raw data into array of distances
var depthArray = depthFrame.ToDepthArray();
//get image
DepthImage.Image = depthFrame.ToBitmap();
//get midpoint
MidPointDistanceViaGetDistanceText.Text = depthFrame.GetDistance(depthFrame.Width/2, depthFrame.Height/2).ToString();
//image
DepthImageWithMinDistance.Image = depthArray.ToBitmap(depthFrame.Width, depthFrame.Height, _minDistance, Color.FromArgb(255, 255, 0, 0));
if (_saveDepthFrame)
{
_saveDepthFrame = false;
depthFrame.ToBitmap().Save(DateTime.Now.ToString("yyyyMMddHHmmss") + "_depth.jpg", ImageFormat.Jpeg);
}
}
}
示例14: _sensor_AllFramesReady
void _sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
//throw new NotImplementedException();
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (colorFrame == null)
{
return;
}
byte[] pixels = new byte[colorFrame.PixelDataLength];
//copy data out into our byte array
colorFrame.CopyPixelDataTo(pixels);
int stride = colorFrame.Width * 4;
image2.Source = BitmapSource.Create(colorFrame.Width, colorFrame.Height, 96, 96, PixelFormats.Bgr32, null, pixels, stride);
if (playerDepth > 500)
{
ScaleTransform scaly = new ScaleTransform(.5, .5);
image3.RenderTransform = scaly;
}
}
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame == null)
{
return;
}
}
}
示例15: mySensor_AllFramesReady
void mySensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
ColorImageFrame c = e.OpenColorImageFrame();
DepthImageFrame d = e.OpenDepthImageFrame();
bool myRenderFlag = (bool)ChkRender.IsChecked;
if (c == null || d == null) return;
c.CopyPixelDataTo(myColorArray);
d.CopyPixelDataTo(myArray);
for(int x = 0; x < 640; x++)
{
for (int y = 0; y < 480; y++)
{
short depthVal = myArray[y * 640 + x];
depthVal = (short)(depthVal >> DepthImageFrame.PlayerIndexBitmaskWidth);
depthVal = (short)(depthVal << DepthImageFrame.PlayerIndexBitmaskWidth);
depthVal /= 255;
if(myRenderFlag==true)
myColorArray[(y * 640 + x) * 4 + 1] = (byte)depthVal;// (byte)(myColorArray[(y * 640 + x) * 4 + 1] / 2 + (byte)depthVal / 2);
myColorArray[(y * 640 + x) * 4 + 3] = 255;
}
}
myBitmap.WritePixels(
new Int32Rect(0, 0, myBitmap.PixelWidth, myBitmap.PixelHeight),
myColorArray,
myBitmap.PixelWidth * 4,
0);
c.Dispose();
d.Dispose();
}