本文整理汇总了C#中AllFramesReadyEventArgs.OpenDepthImageFrame方法的典型用法代码示例。如果您正苦于以下问题:C# AllFramesReadyEventArgs.OpenDepthImageFrame方法的具体用法?C# AllFramesReadyEventArgs.OpenDepthImageFrame怎么用?C# AllFramesReadyEventArgs.OpenDepthImageFrame使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AllFramesReadyEventArgs
的用法示例。
在下文中一共展示了AllFramesReadyEventArgs.OpenDepthImageFrame方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: RuntimeDepthFrameReady
void RuntimeDepthFrameReady(AllFramesReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame == null)
{
return;
}
//turn raw data into array of distances
var depthArray = depthFrame.ToDepthArray();
//get image
DepthImage.Image = depthFrame.ToBitmap();
//get midpoint
MidPointDistanceViaGetDistanceText.Text = depthFrame.GetDistance(depthFrame.Width/2, depthFrame.Height/2).ToString();
//image
DepthImageWithMinDistance.Image = depthArray.ToBitmap(depthFrame.Width, depthFrame.Height, _minDistance, Color.FromArgb(255, 255, 0, 0));
if (_saveDepthFrame)
{
_saveDepthFrame = false;
depthFrame.ToBitmap().Save(DateTime.Now.ToString("yyyyMMddHHmmss") + "_depth.jpg", ImageFormat.Jpeg);
}
}
}
示例2: kinect_AllFramesReady
void kinect_AllFramesReady( object sender, AllFramesReadyEventArgs e )
{
// 赤外線画像を表示する
using ( ColorImageFrame colorFrame = e.OpenColorImageFrame() ) {
if ( colorFrame != null ) {
// 赤外線画像データを取得する
byte[] color = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo( color );
// 赤外線画像を表示する(16bitのグレースケール)
imageInfrared.Source = BitmapSource.Create( colorFrame.Width, colorFrame.Height,
96, 96, PixelFormats.Gray16, null, color,
colorFrame.Width * colorFrame.BytesPerPixel );
}
}
// 距離データを表示する
using ( DepthImageFrame depthFrame = e.OpenDepthImageFrame() ) {
if ( depthFrame != null ) {
// 可視画像に変換する
short[] depth = new short[depthFrame.PixelDataLength];
depthFrame.CopyPixelDataTo( depth );
for ( int i = 0; i < depth.Length; i++ ) {
depth[i] = (short)~depth[i];
}
imageDepth.Source = BitmapSource.Create( depthFrame.Width, depthFrame.Height,
96, 96, PixelFormats.Gray16, null, depth,
depthFrame.Width * depthFrame.BytesPerPixel );
}
}
}
示例3: GetCameraPoint
void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e)
{
using (DepthImageFrame depth = e.OpenDepthImageFrame())
{
if (depth == null ||
_sensor == null)
{
return;
}
DepthImagePoint headDepthPoint = this._sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(
first.Joints[JointType.Head].Position, DepthImageFormat.Resolution640x480Fps30);
DepthImagePoint leftDepthPoint = this._sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(
first.Joints[JointType.HandLeft].Position, DepthImageFormat.Resolution640x480Fps30);
DepthImagePoint rightDepthPoint = this._sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(
first.Joints[JointType.HandRight].Position, DepthImageFormat.Resolution640x480Fps30);
ColorImagePoint headColorPoint = this._sensor.CoordinateMapper.MapDepthPointToColorPoint(
DepthImageFormat.Resolution640x480Fps30, headDepthPoint, ColorImageFormat.RgbResolution640x480Fps30);
ColorImagePoint leftColorPoint = this._sensor.CoordinateMapper.MapDepthPointToColorPoint(
DepthImageFormat.Resolution640x480Fps30, leftDepthPoint, ColorImageFormat.RgbResolution640x480Fps30);
ColorImagePoint rightColorPoint = this._sensor.CoordinateMapper.MapDepthPointToColorPoint(
DepthImageFormat.Resolution640x480Fps30, rightDepthPoint, ColorImageFormat.RgbResolution640x480Fps30);
CameraPosition(ellipseHead, headColorPoint);
CameraPosition(ellipseLeft, leftColorPoint);
CameraPosition(ellipseRight, rightColorPoint);
}
}
示例4: kinect_AllFramesReady
void kinect_AllFramesReady( object sender, AllFramesReadyEventArgs e )
{
using ( var colorFrame = e.OpenColorImageFrame() ) {
if ( colorFrame != null ) {
var pixel = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo( pixel );
ImageRgb.Source = BitmapSource.Create( colorFrame.Width, colorFrame.Height, 96, 96,
PixelFormats.Bgr32, null, pixel, colorFrame.Width * 4 );
}
}
using ( var depthFrame = e.OpenDepthImageFrame() ) {
if ( depthFrame != null ) {
// Depth情報を入れる
// GetRawPixelData()はインタラクションライブラリ内で実装された拡張メソッド
stream.ProcessDepth( depthFrame.GetRawPixelData(), depthFrame.Timestamp );
}
}
using ( var skeletonFrame = e.OpenSkeletonFrame() ) {
if ( skeletonFrame != null ) {
var skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
skeletonFrame.CopySkeletonDataTo( skeletons );
// スケルトン情報を入れる
stream.ProcessSkeleton( skeletons, kinect.AccelerometerGetCurrentReading(), skeletonFrame.Timestamp );
}
}
}
示例5: GetCameraPoint
void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e)
{
using (DepthImageFrame depth = e.OpenDepthImageFrame())
{
if (depth == null ||
kinectSensorChooser1.Kinect == null)
{
return;
}
//Map a joint location to a point on the depth map
//left hand
DepthImagePoint leftDepthPoint =
depth.MapFromSkeletonPoint(first.Joints[JointType.HandLeft].Position);
//Map a depth point to a point on the color image
//left hand
ColorImagePoint leftColorPoint =
depth.MapToColorImagePoint(leftDepthPoint.X, leftDepthPoint.Y,
ColorImageFormat.RgbResolution640x480Fps30);
CameraPosition(arrow, leftColorPoint);
SwipeCheck(leftColorPoint.X);
}
}
示例6: CalibrationAllFramesReady
bool working = false; // Skip frames if we're still processing stuff.
public void CalibrationAllFramesReady(object sender, AllFramesReadyEventArgs e)
{
if (working) return;
working = true;
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (colorFrame == null)
{
working = false;
return;
}
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame == null)
{
working = false;
return;
}
//byte[] pixels = new byte[colorFrame.PixelDataLength];
//colorFrame.CopyPixelDataTo(pixels);
//int stride = colorFrame.Width * 4;
//debugImage.Source = BitmapSource.Create(colorFrame.Width, colorFrame.Height, 96, 96, PixelFormats.Bgr32, null, pixels, stride);
//debugImage.Visibility = Visibility.Visible;
//int code_num = find_code(colorFrame, depthFrame);
int code_num = find_touch(colorFrame, depthFrame);
if (code_num >= 0)
{
// Make the next code visible.
if (code_num < 4)
{
codes[code_num].Visibility = Visibility.Hidden;
codes[code_num + 1].Visibility = Visibility.Visible;
next_code_num++;
Thread.Sleep(3000);
}
else
{
Thread.Sleep(3000);
// We are done. Calculate the coefficients.
sensor.AllFramesReady -= this.CalibrationAllFramesReady;
codes[4].Visibility = Visibility.Hidden;
kinectController.calibration_coefficients = get_calibration_coeffs();
Point center_top_left = code_points[0];
Point center_bot_right = code_points[4];
kinectController.Calibrate((int)(center_top_left.X + 1.25*code_size.X), (int)(center_top_left.Y + 0.7*code_size.Y), (int)(center_bot_right.X - 1.25*code_size.X), (int)(center_bot_right.Y - 0.8*code_size.Y));
sensor.AllFramesReady += kinectController.SensorAllFramesReady;
CalibrationDidComplete();
}
}
}
}
working = false;
}
示例7: kinect_AllFramesReady
private void kinect_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
byte[] imagem = ObterImagemSensorRGB(e.OpenColorImageFrame());
if (chkEscalaCinza.IsChecked.HasValue && chkEscalaCinza.IsChecked.Value)
ReconhecerDistancia(e.OpenDepthImageFrame(), imagem, 2000);
if (imagem != null)
imagemCamera.Source = BitmapSource.Create(kinect.ColorStream.FrameWidth, kinect.ColorStream.FrameHeight, 96, 96, PixelFormats.Bgr32, null, imagem, kinect.ColorStream.FrameBytesPerPixel * kinect.ColorStream.FrameWidth);
}
示例8: ManageAllFrame
/// <summary>
/// Manage frames of kinect sensor according to the services activated
/// </summary>
/// <param name="e"></param>
private void ManageAllFrame(AllFramesReadyEventArgs e)
{
if (!IsRunning)
{
return;
}
// SkeletonTracking Frame Manager
using (SkeletonFrame SFrame = e.OpenSkeletonFrame())
{
try
{
ManageSkeletonFrame(SFrame);
}
catch (Exception ex)
{
// Just log the error
Console.Error.WriteLine("Error with skeleton frame : " + ex.Message + " _ " + ex.StackTrace);
}
}
// Color Frame Manager
if (PropertiesPluginKinect.Instance.EnableColorFrameService)
{
using (ColorImageFrame CFrame = e.OpenColorImageFrame())
{
try
{
ManageColorFrame(CFrame);
}
catch (Exception ex)
{
// Just log the error
Console.Error.WriteLine("Error with color frame : " + ex.Message + " _ " + ex.StackTrace);
}
}
}
// Depth Frame Manager
if (PropertiesPluginKinect.Instance.EnableDepthFrameService ||
PropertiesPluginKinect.Instance.KinectPointingModeEnabled ||
PropertiesPluginKinect.Instance.EnableGestureGrip)
{
using (DepthImageFrame DFrame = e.OpenDepthImageFrame())
{
try
{
ManageDepthFrame(DFrame);
}
catch (Exception ex)
{
// Just log the error
Console.Error.WriteLine("Error with depth frame : " + ex.Message + " _ " + ex.StackTrace);
}
}
}
}
示例9: GetCameraPoint
void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e)
{
using (DepthImageFrame depth = e.OpenDepthImageFrame())
{
if (depth == null ||
_sensor == null)
{
return;
}
//Map a joint location to a point on the depth map
//head
DepthImagePoint headDepthPoint =
depth.MapFromSkeletonPoint(first.Joints[JointType.Head].Position);
//left hand
DepthImagePoint leftHandDepthPoint =
depth.MapFromSkeletonPoint(first.Joints[JointType.HandLeft].Position);
//right hand
DepthImagePoint rightHandDepthPoint =
depth.MapFromSkeletonPoint(first.Joints[JointType.HandRight].Position);
//left foot
DepthImagePoint leftFootDepthPoint =
depth.MapFromSkeletonPoint(first.Joints[JointType.HandRight].Position);
//right foot
DepthImagePoint rightFootDepthPoint =
depth.MapFromSkeletonPoint(first.Joints[JointType.HandRight].Position);
//hip
DepthImagePoint hipDepthPoint =
depth.MapFromSkeletonPoint(first.Joints[JointType.HipCenter].Position);
//Map a depth point to a point on the color image
//head
ColorImagePoint headColorPoint =
depth.MapToColorImagePoint(headDepthPoint.X, headDepthPoint.Y,
ColorImageFormat.RgbResolution640x480Fps30);
//left hand
ColorImagePoint leftHandColorPoint =
depth.MapToColorImagePoint(leftHandDepthPoint.X, leftHandDepthPoint.Y,
ColorImageFormat.RgbResolution640x480Fps30);
//right hand
ColorImagePoint rightHandColorPoint =
depth.MapToColorImagePoint(rightHandDepthPoint.X, rightHandDepthPoint.Y,
ColorImageFormat.RgbResolution640x480Fps30);
//left foot
ColorImagePoint leftFootColorPoint =
depth.MapToColorImagePoint(leftFootDepthPoint.X, leftFootDepthPoint.Y,
ColorImageFormat.RgbResolution640x480Fps30);
//right foot
ColorImagePoint rightFootColorPoint =
depth.MapToColorImagePoint(rightFootDepthPoint.X, rightFootDepthPoint.Y,
ColorImageFormat.RgbResolution640x480Fps30);
//hip
ColorImagePoint hipColorPoint =
depth.MapToColorImagePoint(hipDepthPoint.X, hipDepthPoint.Y,
ColorImageFormat.RgbResolution640x480Fps30);
}
}
示例10: kinect_AllFramesReady
// すべてのデータの更新通知を受け取る
void kinect_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
RenderScreen(colorFrame, depthFrame);
}
}
}
示例11: Kinect_AllFramesReady
private void Kinect_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
if (cnt++ < 3) return;
cnt = 0;
using (var f = e.OpenDepthImageFrame())
{
var pd = f.GetRawPixelData();
int mx = 1000, my = 1000, Mx = 0, My = 0;
for (int i = 0; i < 320; i++)
{
for (int j = 0; j < 240; j++)
{
if (get(pd, i, j).PlayerIndex > 0)
{
if (i < mx) mx = i;
if (i > Mx) Mx = i;
if (j < my) my = j;
if (j > My) My = j;
}
}
}
if (mx<1000)
{
float wc = (Mx - mx) / 16;
float hc = (My - my) / 16;
if (keep_proportions)
{
if (hc < wc) hc = wc;
else wc = hc;
}
Console.WriteLine("mx={0},Mx={1},my={2},My={3}", mx, Mx, my, My);
for (int j=0;j<16;j++)
{
int b1 = 0;
for (int i = 0; i<8; i++)
{
b1 = b1 * 2 + ((get(pd, mx + (int)(wc * i), my + (int)(hc * j)).PlayerIndex > 0) ? 1 : 0);
}
int b2 = 0;
for (int i = 8; i < 16; i++)
{
b2 = b2 * 2 + ((get(pd, mx + (int)(wc * i), my + (int)(hc * j)).PlayerIndex > 0) ? 1 : 0);
}
byte[] x = new byte[2];
x[0] = (byte)b1;
x[1] = (byte)b2;
COM.Write(x, 0, 2);
Console.WriteLine("Sending {0},{1}", b1, b2);
}
}
// Console.Write("{0}\r", pd.Length);
}
}
示例12: GetCameraPoint
private void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame == null || _sensor == null)
return;
// DepthImagePoint headDepthPoint = depthFrame.MapFromSkeletonPoint(first.Joints[JointType.Head].Position);
//left hand point information
DepthImagePoint LeftHandDepthPoint = depthFrame.MapFromSkeletonPoint(first.Joints[JointType.HandLeft].Position);
LeftHandPoint newLeftHandPoint = new LeftHandPoint()
{
X = LeftHandDepthPoint.X,
Y = LeftHandDepthPoint.Y,
Z = LeftHandDepthPoint.Depth,
T = DateTime.Now
};
DepthImagePoint HeadDepthPoint = depthFrame.MapFromSkeletonPoint(first.Joints[JointType.Head].Position);
HeadPoint newHeadPoint = new HeadPoint()
{
X = HeadDepthPoint.X,
Y = HeadDepthPoint.Y,
Z = HeadDepthPoint.Depth,
T = DateTime.Now
};
//user should stand in the right place before eveything start
// the two if condition requires the user to stand in front of Kinect in a box area
if (newHeadPoint.Z < 1700 || newHeadPoint.Z > 2000)
{
StatusLabel.Visibility = System.Windows.Visibility.Hidden;
StatusLabel.Content = "";
return;
}
StatusLabel.Visibility = System.Windows.Visibility.Visible;
StatusLabel.Content = "Control Mode(1.7m~2m): " + newHeadPoint.Z / 1000 + "m";
// left hand wave to quit system
if (newLeftHandPoint.Y < newHeadPoint.Y)
{
// MessageBox.Show("Left wave");
LeftHandWave(newLeftHandPoint, newHeadPoint);
}
else
{
IsLeftHandWave = false;
}
}// end of using statement
}
示例13: kinect_AllFramesReady
private void kinect_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
byte[] imagem = ObterImagemSensorRGB(e.OpenColorImageFrame());
if( chkEscalaCinza.IsChecked.HasValue && chkEscalaCinza.IsChecked.Value)
ReconhecerDistancia(e.OpenDepthImageFrame(),imagem, 2000);
if (imagem != null)
canvasKinect.Background = new ImageBrush(BitmapSource.Create(kinect.ColorStream.FrameWidth, kinect.ColorStream.FrameHeight,
96, 96, PixelFormats.Bgr32, null, imagem,
kinect.ColorStream.FrameWidth * kinect.ColorStream.FrameBytesPerPixel));
canvasKinect.Children.Clear();
DesenharEsqueletoUsuario(e.OpenSkeletonFrame());
}
开发者ID:gilgaljunior,项目名称:CrieAplicacoesInterativascomoMicrosoftKinect,代码行数:16,代码来源:MainWindow.xaml.cs
示例14: kinect_AllFramesReady
// すべてのデータの更新通知を受け取る
void kinect_AllFramesReady( object sender, AllFramesReadyEventArgs e )
{
// Disposableなのでusingでくくる
using ( ColorImageFrame colorFrame = e.OpenColorImageFrame() ) {
if ( colorFrame != null ) {
imageRgbCamera.Source = colorFrame.ToBitmapSource();
}
}
// Disposableなのでusingでくくる
using ( DepthImageFrame depthFrame = e.OpenDepthImageFrame() ) {
if ( depthFrame != null ) {
imageDepthCamera.Source = depthFrame.ToBitmapSource();
}
}
}
示例15: newSensor_AllFramesReady
void newSensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame == null)
{
return;
}
byte[] pixels = GenerateColoredBytes(depthFrame);
int stride = depthFrame.Width * 4;
image1.Source = BitmapSource.Create(depthFrame.Width, depthFrame.Height, 96, 96, PixelFormats.Bgr32, null, pixels, stride);
}
}