本文整理汇总了C#中ColorImagePoint类的典型用法代码示例。如果您正苦于以下问题:C# ColorImagePoint类的具体用法?C# ColorImagePoint怎么用?C# ColorImagePoint使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ColorImagePoint类属于命名空间,在下文中一共展示了ColorImagePoint类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: adjustHandPosition
private void adjustHandPosition(FrameworkElement element, ColorImagePoint hand, Double half_shoulder_width)
{
element.Width = half_shoulder_width;
element.Height = half_shoulder_width;
Canvas.SetLeft(element, hand.X - element.Width / 2);
Canvas.SetTop(element, hand.Y - element.Height / 2);
}
示例2: CameraPosition
private void CameraPosition(FrameworkElement element, ColorImagePoint point)
{
//Divide by 2 for width and height so point is right in the middle
// instead of in top/left corner
Canvas.SetLeft(element, point.X - element.Width / 2);
Canvas.SetTop(element, point.Y - element.Height / 2);
}
示例3: ConvertDepthColor
/// <summary>
/// 距離データをカラー画像に変換する
/// </summary>
/// <param name="kinect"></param>
/// <param name="depthFrame"></param>
/// <returns></returns>
private byte[] ConvertDepthColor( KinectSensor kinect, DepthImageFrame depthFrame )
{
ColorImageStream colorStream = kinect.ColorStream;
DepthImageStream depthStream = kinect.DepthStream;
// 距離カメラのピクセルごとのデータを取得する
short[] depthPixel = new short[depthFrame.PixelDataLength];
depthFrame.CopyPixelDataTo( depthPixel );
// 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ)
ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength];
kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel,
colorStream.Format, colorPoint );
byte[] depthColor = new byte[depthFrame.PixelDataLength * Bgr32BytesPerPixel];
for ( int index = 0; index < depthPixel.Length; index++ ) {
// 距離カメラのデータから、プレイヤーIDと距離を取得する
int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask;
int distance = depthPixel[index] >> DepthImageFrame.PlayerIndexBitmaskWidth;
// 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う
int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 );
int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 );
int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel;
if ( player != 0 ) {
depthColor[colorIndex] = 255;
depthColor[colorIndex + 1] = 255;
depthColor[colorIndex + 2] = 255;
}
else {
// サポート外 0-40cm
if ( distance == depthStream.UnknownDepth ) {
depthColor[colorIndex] = 0;
depthColor[colorIndex + 1] = 0;
depthColor[colorIndex + 2] = 255;
}
// 近すぎ 40cm-80cm(default mode)
else if ( distance == depthStream.TooNearDepth ) {
depthColor[colorIndex] = 0;
depthColor[colorIndex + 1] = 255;
depthColor[colorIndex + 2] = 0;
}
// 遠すぎ 3m(Near),4m(Default)-8m
else if ( distance == depthStream.TooFarDepth ) {
depthColor[colorIndex] = 255;
depthColor[colorIndex + 1] = 0;
depthColor[colorIndex + 2] = 0;
}
// 有効な距離データ
else {
depthColor[colorIndex] = 0;
depthColor[colorIndex + 1] = 255;
depthColor[colorIndex + 2] = 255;
}
}
}
return depthColor;
}
示例4: AddLine
private void AddLine(ColorImagePoint p1, ColorImagePoint p2)
{
Line myLine = new Line();
myLine.Stroke = System.Windows.Media.Brushes.Black;
myLine.X1 = p1.X;
myLine.X2 = p2.X;
myLine.Y1 = p1.Y;
myLine.Y2 = p2.Y;
myLine.StrokeThickness = 1;
cvs.Children.Add(myLine);
}
示例5: CameraPosition
private void CameraPosition(FrameworkElement element, ColorImagePoint point)
{
Canvas.SetLeft(element, point.X - element.Width / 2);
Canvas.SetTop(element, point.Y - element.Height / 2);
// Check if you're choosing any of the choices
if (element.Name.Equals("rightEllipse"))
{
if(Canvas.GetLeft(element) > clickLeftBorder)
{
Console.WriteLine("You clicked");
switch (greenIndex)
{
case 0:
clickLabel.Content = "Bottom box clicked";
break;
case 1:
clickLabel.Content = "Top box clicked";
break;
case 2:
clickLabel.Content = "Middle box clicked";
break;
}
}
}
else if (element.Name.Equals("leftEllipse"))
{
if (Canvas.GetLeft(element) < rollRightBorder)
{
Console.WriteLine("You be rollin");
if (rolling == false)
{
rollTimer.Start();
rolling = true;
}
}
else
{
if(rolling == true)
{
rollTimer.Stop();
rolling = false;
}
}
}
}
示例6: BackgroundMask
/// <summary>
/// プレーヤーだけ表示する
/// </summary>
/// <param name="colorFrame"></param>
/// <param name="depthFrame"></param>
/// <returns></returns>
private byte[] BackgroundMask( KinectSensor kinect,
ColorImageFrame colorFrame, DepthImageFrame depthFrame )
{
ColorImageStream colorStream = kinect.ColorStream;
DepthImageStream depthStream = kinect.DepthStream;
// RGBカメラのピクセルごとのデータを取得する
byte[] colorPixel = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo( colorPixel );
// 距離カメラのピクセルごとのデータを取得する
short[] depthPixel = new short[depthFrame.PixelDataLength];
depthFrame.CopyPixelDataTo( depthPixel );
// 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ)
ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength];
kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel,
colorStream.Format, colorPoint );
// 出力バッファ(初期値は白(255,255,255))
byte[] outputColor = new byte[colorPixel.Length];
for ( int i = 0; i < outputColor.Length; i += Bgr32BytesPerPixel ) {
outputColor[i] = 255;
outputColor[i + 1] = 255;
outputColor[i + 2] = 255;
}
for ( int index = 0; index < depthPixel.Length; index++ ) {
// プレイヤーを取得する
int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask;
// 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う
int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 );
int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 );
int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel;
// プレーヤーを検出した座標だけ、RGBカメラの画像を使う
if ( player != 0 ) {
outputColor[colorIndex] = colorPixel[colorIndex];
outputColor[colorIndex + 1] = colorPixel[colorIndex + 1];
outputColor[colorIndex + 2] = colorPixel[colorIndex + 2];
}
}
return outputColor;
}
示例7: CheckFacePosition
/// <summary>
/// 顔の位置を取得
/// </summary>
/// <param name="headPosition">スケルトンの頭の位置座標</param>
/// <returns>顔座標</returns>
private Rect CheckFacePosition( ColorImagePoint headPosition )
{
//切り取る領域の範囲
int snipWidth = 200;
int snipHeight = 200;
// 返却用Rect (初期値はスケルトンの頭の座標とimage2画像の幅)
Rect reRect = new Rect(headPosition.X, headPosition.Y,
image2.Width, image2.Height);
storage.Clear();
openCVGrayImage.ResetROI(); // たまにROIがセットされた状態で呼ばれるためROIをリセット
openCVImage.CopyFrom( outputImage ); // WriteableBitmap -> IplImage
Cv.CvtColor( openCVImage, openCVGrayImage, ColorConversion.BgrToGray ); // 画像をグレイスケール化
Cv.EqualizeHist( openCVGrayImage, openCVGrayImage ); // 画像の平滑化
// 顔認識
try {
// 画像の切り取り
var snipImage = SnipFaceImage( openCVGrayImage, headPosition, snipWidth, snipHeight );
if ( snipImage != null ) {
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects( snipImage, cascade, storage );
// 顔を検出した場合
if ( faces.Total > 0 ) {
reRect.X = faces[0].Value.Rect.X + (headPosition.X - snipWidth / 2);
reRect.Y = faces[0].Value.Rect.Y + (headPosition.Y - snipHeight / 2);
reRect.Width = faces[0].Value.Rect.Width;
reRect.Height = faces[0].Value.Rect.Height;
}
}
}
catch ( Exception ) { }
return reRect;
}
示例8: IsSteady
/// <summary>
/// 停止状態にあるかチェックする
/// </summary>
/// <param name="skeletonFrame"></param>
/// <param name="point"></param>
/// <returns></returns>
bool IsSteady( SkeletonFrame skeletonFrame, ColorImagePoint point )
{
var currentPoint = new FramePoint()
{
Point = point,
TimeStamp = skeletonFrame.Timestamp,
};
// milliseconds時間経過したら steady
if ( (currentPoint.TimeStamp - basePoint.TimeStamp) > milliseconds ) {
basePoint = currentPoint;
return true;
}
// 座標の変化量がthreshold以上ならば、basePointを更新して初めから計測
if ( Math.Abs( currentPoint.Point.X - basePoint.Point.X ) > threshold
|| Math.Abs( currentPoint.Point.Y - basePoint.Point.Y ) > threshold ) {
// 座標が動いたので基点を動いた位置にずらして、最初から計測
basePoint = currentPoint;
}
return false;
}
示例9: IsKnownPoint
//
// Summary:
// Tests whether the ColorImagePoint has a known value.
//
// Parameters:
// colorImagePoint:
// The ColorImagePoint to test.
//
// Returns:
// Returns true if the ColorImagePoint has a known value, false otherwise.
public static bool IsKnownPoint(ColorImagePoint colorImagePoint);
示例10: DepthToColorCallback
/// <summary>
/// Callback to help with mapping depth pixel to color pixel data. Uses Kinect sensor's MapDepthToColorImagePoint to
/// do the conversion
/// </summary>
/// <returns>
/// The depth to color callback.
/// </returns>
private int DepthToColorCallback(
uint depthFrameWidth,
uint depthFrameHeight,
uint colorFrameWidth,
uint colorFrameHeight,
float zoomFactor,
Point viewOffset,
int depthX,
int depthY,
ushort depthZ,
out int colorX,
out int colorY)
{
int retCode = 0;
colorX = 0;
colorY = 0;
if (this.sensor != null)
{
var colorPoint = new ColorImagePoint();
try
{
DepthImagePoint depthImagePoint = new DepthImagePoint()
{
X = depthX,
Y = depthY,
Depth = depthZ,
};
colorPoint = this.sensor.CoordinateMapper.MapDepthPointToColorPoint(
this.sensor.DepthStream.Format,
depthImagePoint,
this.sensor.ColorStream.Format);
}
catch (InvalidOperationException e)
{
string traceStr = string.Format(
CultureInfo.CurrentCulture,
"Exception on MapDepthToColorImagePoint while translating depth point({0},{1},{2}). Exception={3}",
depthX,
depthY,
depthZ,
e.Message);
Trace.WriteLineIf(this.traceLevel >= TraceLevel.Error, traceStr, TraceCategory);
retCode = -1;
}
colorX = colorPoint.X;
colorY = colorPoint.Y;
}
else
{
retCode = -1;
}
return retCode;
}
示例11: ConvertDepthColor
/// <summary>
/// 距離データをカラー画像に変換する
/// </summary>
/// <param name="kinect"></param>
/// <param name="depthFrame"></param>
/// <returns></returns>
private byte[] ConvertDepthColor( KinectSensor kinect, DepthImageFrame depthFrame )
{
ColorImageStream colorStream = kinect.ColorStream;
DepthImageStream depthStream = kinect.DepthStream;
// 距離カメラのピクセルごとのデータを取得する
short[] depthPixel = new short[depthFrame.PixelDataLength];
depthFrame.CopyPixelDataTo( depthPixel );
// 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ)
ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength];
kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel,
colorStream.Format, colorPoint );
byte[] depthColor = new byte[depthFrame.PixelDataLength * Bgr32BytesPerPixel];
for ( int index = 0; index < depthPixel.Length; index++ ) {
// 距離カメラのデータから、プレイヤーIDと距離を取得する
int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask;
int distance = depthPixel[index] >> DepthImageFrame.PlayerIndexBitmaskWidth;
// 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う
int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 );
int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 );
int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel;
// プレイヤーがいるピクセルの場合
if ( player != 0 ) {
// 有効なプレーヤーに色付けする
if ( enablePlayer[player] ) {
depthColor[colorIndex] = playerColor[player].B;
depthColor[colorIndex + 1] = playerColor[player].G;
depthColor[colorIndex + 2] = playerColor[player].R;
}
}
}
return depthColor;
}
示例12: CameraPosition
private void CameraPosition(FrameworkElement element, ColorImagePoint point)
{
Console.Out.WriteLine(point.X.ToString());
Canvas.SetLeft(element, point.X - element.Width / 2);
Canvas.SetTop(element, point.Y - element.Height / 2);
}
示例13: SensorAllFramesReady
private byte[][] SensorAllFramesReady(object sender, AllFramesReadyEventArgs e)
{
bool depthReceived = false;
bool colorReceived = false;
DepthImagePixel[] depthPixels;
byte[] colorPixels;
ColorImagePoint[] colorCoordinates;
int colorToDepthDivisor;
byte[] greenScreenPixelData;
// Allocate space to put the color pixels we'll create
depthPixels = new DepthImagePixel[this.kinectSensor.DepthStream.FramePixelDataLength];
colorPixels = new byte[this.kinectSensor.ColorStream.FramePixelDataLength];
greenScreenPixelData = new byte[this.kinectSensor.DepthStream.FramePixelDataLength];
colorCoordinates = new ColorImagePoint[this.kinectSensor.DepthStream.FramePixelDataLength];
int colorWidth = this.kinectSensor.ColorStream.FrameWidth;
int colorHeight = this.kinectSensor.ColorStream.FrameHeight;
colorToDepthDivisor = colorWidth / 640;
byte[][] results = new byte[2][]; // kinectSensor.DepthStream.FramePixelDataLength];
DepthImageFormat DepthFormat = DepthImageFormat.Resolution640x480Fps30;
ColorImageFormat ColorFormat = ColorImageFormat.RgbResolution640x480Fps30;
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (null != depthFrame)
{
// Copy the pixel data from the image to a temporary array
depthFrame.CopyDepthImagePixelDataTo(depthPixels);
depthReceived = true;
}
}
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (null != colorFrame)
{
// Copy the pixel data from the image to a temporary array
this.outputColorBitmap = new WriteableBitmap(640, 480, 96, 96, PixelFormats.Bgr32, null);
colorFrame.CopyPixelDataTo(colorPixels);
colorReceived = true;
}
}
if (true == depthReceived)
{
this.kinectSensor.CoordinateMapper.MapDepthFrameToColorFrame(
DepthFormat,
depthPixels,
ColorFormat,
colorCoordinates);
Array.Clear(greenScreenPixelData, 0, greenScreenPixelData.Length);
// loop over each row and column of the depth
for (int y = 0; y < 480; ++y)
{
for (int x = 0; x < 640; ++x)
{
// calculate index into depth array
int depthIndex = x + (y * 640);
DepthImagePixel depthPixel = depthPixels[depthIndex];
int player = depthPixel.PlayerIndex;
// if we're tracking a player for the current pixel, do green screen
if (player > 0)
{
// retrieve the depth to color mapping for the current depth pixel
ColorImagePoint colorImagePoint = colorCoordinates[depthIndex];
// scale color coordinates to depth resolution
int colorInDepthX = colorImagePoint.X / colorToDepthDivisor;
int colorInDepthY = colorImagePoint.Y / colorToDepthDivisor;
// make sure the depth pixel maps to a valid point in color space
if (colorInDepthX > 0 && colorInDepthX < 640 && colorInDepthY >= 0 && colorInDepthY < 480)
{
// calculate index into the green screen pixel array
int greenScreenIndex = colorInDepthX + (colorInDepthY * 640);
// set opaque
greenScreenPixelData[greenScreenIndex] = 33;
// compensate for depth/color not corresponding exactly by setting the pixel
// to the left to opaque as well
greenScreenPixelData[greenScreenIndex - 1] = 33;
}
}
}
}
}
if (true == colorReceived)
{
// Write the pixel data into our bitmap
//.........这里部分代码省略.........
示例14: SaveBuffer
/// <summary>
/// キネクトの画像をバッファへ保存する
/// </summary>
/// <param name="kinectDevice"></param>
/// <param name="colorFrame"></param>
/// <param name="depthFrame"></param>
private void SaveBuffer(ColorImageFrame colorFrame, DepthImageFrame depthFrame, SkeletonFrame skeletonFrame)
{
if (kinectDevice == null || depthFrame == null || colorFrame == null || skeletonFrame == null) return;
ColorImageStream colorStream = kinectDevice.ColorStream;
DepthImageStream depthStream = kinectDevice.DepthStream;
screenImageStride = kinectDevice.DepthStream.FrameWidth * colorFrame.BytesPerPixel;
int colorStride = colorFrame.BytesPerPixel * colorFrame.Width; //4×画像幅
int ImageIndex = 0;
depthFrame.CopyPixelDataTo(_depthPixelData);
colorFrame.CopyPixelDataTo(_colorPixelData);
ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength];
short[] depthPixel = new short[depthFrame.PixelDataLength];
kinectDevice.MapDepthFrameToColorFrame(depthFrame.Format, depthPixel, colorFrame.Format, colorPoint);
byte[] byteRoom = new byte[depthFrame.Height * screenImageStride];
byte[] bytePlayer = new byte[depthFrame.Height * screenImageStride];
double[] depth = new double[depthFrame.Height * screenImageStride];
int[] playerIndexArray = new int[depthFrame.Height * screenImageStride];
for (int depthY = 0; depthY < depthFrame.Height; depthY++)
{
for (int depthX = 0; depthX < depthFrame.Width; depthX++, ImageIndex += colorFrame.BytesPerPixel)
{
//ImageIndex += colorFrame.BytesPerPixel;
int depthPixelIndex = depthX + (depthY * depthFrame.Width);
int playerIndex = _depthPixelData[depthPixelIndex] & DepthImageFrame.PlayerIndexBitmask; //人のID取得
int x = Math.Min(colorPoint[depthPixelIndex].X, colorStream.FrameWidth - 1);
int y = Math.Min(colorPoint[depthPixelIndex].Y, colorStream.FrameHeight - 1);
int colorPixelIndex = (x * colorFrame.BytesPerPixel) + (y * colorStride);
if (playerIndex != 0)
{
bytePlayer[ImageIndex] = _colorPixelData[colorPixelIndex]; //Blue
bytePlayer[ImageIndex + 1] = _colorPixelData[colorPixelIndex + 1]; //Green
bytePlayer[ImageIndex + 2] = _colorPixelData[colorPixelIndex + 2]; //Red
bytePlayer[ImageIndex + 3] = 0xFF; //Alpha
//ピクセル深度を取得
depth[ImageIndex] = _depthPixelData[depthPixelIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth;
playerIndexArray[ImageIndex] = playerIndex;
}
else
{
byteRoom[ImageIndex] = _colorPixelData[colorPixelIndex]; //Blue
byteRoom[ImageIndex + 1] = _colorPixelData[colorPixelIndex + 1]; //Green
byteRoom[ImageIndex + 2] = _colorPixelData[colorPixelIndex + 2]; //Red
byteRoom[ImageIndex + 3] = 0xFF; //Alpha
}
}
}
//人の情報をリングバッファへ保存
ringbuf.save_framedata(ref bytePlayer);
//ringbuf.save_depthdata(depth);
ringbuf.save_playerIndexdata(playerIndexArray);
ringbuf.set_nextframe();
////byteからビットマップへ書出し
//_room_bitmap.WritePixels(_screenImageRect, byteRoom, screenImageStride, 0);
//room_image.Source = _room_bitmap;
RenderScreen2();
}
示例15: MoveToCameraPosition
private void MoveToCameraPosition(FrameworkElement element, ColorImagePoint point)
{
Canvas.SetLeft(element, point.X );
Canvas.SetTop(element, point.Y );
}