本文整理汇总了C++中Kinect::Skeleton方法的典型用法代码示例。如果您正苦于以下问题:C++ Kinect::Skeleton方法的具体用法?C++ Kinect::Skeleton怎么用?C++ Kinect::Skeleton使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Kinect
的用法示例。
在下文中一共展示了Kinect::Skeleton方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[]){
#pragma region // --- init ---
//kinectクラスの宣言と初期化
Kinect kinect;
kinect.Initialize(NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX );
//ストリーム作る。画像用とデプス用
ImageStream& video = kinect.VideoStream();
video.Open( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480 );
ImageStream& depth = kinect.DepthStream();
depth.Open( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240 );
//skeletonを使う準備
kinect::nui::SkeletonEngine& skeleton = kinect.Skeleton();
skeleton.Enable();
//opencvのmatとwindowの準備
namedWindow("camera_window");
Mat camera_img = Mat(Size(video.Width(), video.Height()), CV_8UC4);
namedWindow("depth_window");
Mat depth_img = Mat(Size(depth.Width(), depth.Height()), CV_16UC1); //depthは16bitみたい
//Depthとcameraの位置合わせ(kinect_utility.h)
ColorFromDepthEngine CFDengine;
#pragma endregion
#pragma region // --- my init ---
mine::flag skeleton_flag;
#pragma endregion
while ( 1 ) {
# pragma region // --- get data ---
// データの更新を待つ
kinect.WaitAndUpdateAll();
//Skeletonを取得
kinect::nui::SkeletonFrame skeletonframe= skeleton.GetNextFrame();
// 次のフレームのデータを取得する(OpenNIっぽく)
ImageFrame image( video );
DepthFrame depthMD( depth );
// cv::Matへのデータのコピー
camera_img = Mat(camera_img.size(), CV_8UC4, (BYTE *)image.Bits());
depth_img = Mat(depth_img.size(), CV_16UC1, (BYTE *)depthMD.Bits());
// adjust with utility
cv::Mat adjusted_camera_img(depth_img.size(), CV_8UC4);
adjusted_camera_img = CFDengine.getAdjustedImage(camera_img);
#pragma endregion
#pragma region // --- processing ---
SkeletonDrawer skeletondrawer(skeletonframe);
SkeletonPoints me = skeletondrawer.me(depth_img.cols, depth_img.rows); //Depth画面中のskeletonを取得
if(skeleton_flag.IsTrue){
me.Drawall(adjusted_camera_img);
}
if(me.IsTracked){ // 画面内に人がいたら
Point prhand = me.getPoint(NUI_SKELETON_POSITION_HAND_RIGHT);
Point plhand = me.getPoint(NUI_SKELETON_POSITION_HAND_LEFT);
// 色の決定
Scalar color_rhand = Scalar(0, 0, 255);
Scalar color_lhand = Scalar(0, 255, 0);
if(norm(prhand-plhand) < THRESH_SAMEPOS){
ushort drhand = depth_img.at<ushort>(prhand);
ushort dlhand = depth_img.at<ushort>(plhand);
if(abs(drhand-dlhand) < THRESH_SAMEDEPTH){
Scalar mix = color_lhand;
mix += color_rhand;
color_rhand = mix;
color_lhand = mix;
}
}
// 画像に塗る
circle(adjusted_camera_img, prhand, 5, color_rhand, -1);
circle(adjusted_camera_img, plhand, 5, color_lhand, -1);
}
#pragma endregion
#pragma region // --- show ---
cv::resize(adjusted_camera_img, camera_img, camera_img.size());
cv::imshow("camera_window", camera_img);
#pragma endregion
#pragma region // --- keyboard callback ---
int key = waitKey(1);
if ( key == 'q' ) {
break;
}
else if ( key == 's' ) {
skeleton_flag.reverse();
//.........这里部分代码省略.........
示例2: main
int main(int argc, char *argv[]) {
#pragma region // --- init ---
//kinectクラスの宣言と初期化
Kinect kinect;
kinect.Initialize(NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX );
//ストリーム作る。画像用とデプス用
ImageStream& video = kinect.VideoStream();
video.Open( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480 );
ImageStream& depth = kinect.DepthStream();
depth.Open( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240 );
//skeletonを使う準備
kinect::nui::SkeletonEngine& skeleton = kinect.Skeleton();
skeleton.Enable();
//opencvのmatとwindowの準備
namedWindow("camera_window");
Mat camera_img = Mat(Size(video.Width(), video.Height()), CV_8UC4);
namedWindow("depth_window");
Mat depth_img = Mat(Size(depth.Width(), depth.Height()), CV_16UC1); //depthは16bitみたい
//Depthとcameraの位置合わせ(kinect_utility.h)
ColorFromDepthEngine CFDengine;
#pragma endregion
while ( 1 ) {
# pragma region // --- get data ---
// データの更新を待つ
kinect.WaitAndUpdateAll();
//Skeletonを取得
kinect::nui::SkeletonFrame skeletonframe= skeleton.GetNextFrame();
// 次のフレームのデータを取得する(OpenNIっぽく)
ImageFrame image( video );
DepthFrame depthMD( depth );
// cv::Matへのデータのコピー
camera_img = Mat(camera_img.size(), CV_8UC4, (BYTE *)image.Bits());
depth_img = Mat(depth_img.size(), CV_16UC1, (BYTE *)depthMD.Bits());
// adjust with utility
cv::Mat adjusted_camera_img(depth_img.size(), CV_8UC4);
adjusted_camera_img = CFDengine.getAdjustedImage(camera_img);
#pragma endregion
#pragma region // --- processing ---
SkeletonDrawer skeletondrawer(skeletonframe);
SkeletonPoints me = skeletondrawer.me(depth_img.cols, depth_img.rows); //Depth画面中のskeletonを取得
if(me.IsTracked) { // 画面内に人がいたら
me.Drawall(adjusted_camera_img); // Skeleton を描く
// 左右の手
circle(adjusted_camera_img, me.getPoint(NUI_SKELETON_POSITION_HAND_LEFT), 5, cv::Scalar(0,0,255), -1);
circle(adjusted_camera_img, me.getPoint(NUI_SKELETON_POSITION_HAND_RIGHT), 5, cv::Scalar(0,255,0), -1);
std::cout<<"z"<<(int)me.get3dPoint(NUI_SKELETON_POSITION_HAND_RIGHT)[2]<<depth_img.at<ushort>(me.getPoint(NUI_SKELETON_POSITION_HAND_RIGHT))<<std::endl;
}
#pragma endregion
#pragma region // --- show ---
cv::resize(adjusted_camera_img, camera_img, camera_img.size());
cv::imshow("camera_window", camera_img);
#pragma endregion
#pragma region // --- keyboard callback ---
int key = waitKey(1);
if ( key == 'q' ) {
break;
}
#pragma endregion
}
return 0; // 正常終了
}
示例3: main
int main(int argc, char *argv[]) {
#pragma region // --- init ---
//kinectクラスの宣言と初期化
Kinect kinect;
kinect.Initialize(NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX );
//ストリーム作る。画像用とデプス用
ImageStream& video = kinect.VideoStream();
video.Open( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480 );
ImageStream& depth = kinect.DepthStream();
depth.Open( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240 );
//skeletonを使う準備
kinect::nui::SkeletonEngine& skeleton = kinect.Skeleton();
skeleton.Enable();
//opencvのmatとwindowの準備
namedWindow("camera_window");
Mat camera_img = Mat(Size(video.Width(), video.Height()), CV_8UC4);
namedWindow("depth_window");
Mat depth_img = Mat(Size(depth.Width(), depth.Height()), CV_16UC1); //depthは16bitみたい
//Depthとcameraの位置合わせ(kinect_utility.h)
ColorFromDepthEngine CFDengine;
#pragma endregion
#pragma region // --- my init ---
mine::flag skeleton_flag;
// 一時的に絵を書くためのバッファ。透過色(黒)で塗りつぶしておく
cv::Mat buf_img(depth_img.size(), CV_8UC4, Scalar(0));
cv::Mat buf_depth_img(depth_img.size(), CV_16UC1, Scalar(0));
// 右手のエフェクト
float rballsize = 3.0;
Point prhand_prev(0, 0);
ushort drhand_prev = 0;
mine::ThrownObject rball;
#pragma endregion
while ( 1 ) {
# pragma region // --- get data ---
// データの更新を待つ
kinect.WaitAndUpdateAll();
//Skeletonを取得
kinect::nui::SkeletonFrame skeletonframe= skeleton.GetNextFrame();
// 次のフレームのデータを取得する(OpenNIっぽく)
ImageFrame image( video );
DepthFrame depthMD( depth );
// cv::Matへのデータのコピー
camera_img = Mat(camera_img.size(), CV_8UC4, (BYTE *)image.Bits());
depth_img = Mat(depth_img.size(), CV_16UC1, (BYTE *)depthMD.Bits());
// adjust with utility
cv::Mat adjusted_camera_img(depth_img.size(), CV_8UC4);
adjusted_camera_img = CFDengine.getAdjustedImage(camera_img);
#pragma endregion
#pragma region // --- processing ---
SkeletonDrawer skeletondrawer(skeletonframe);
SkeletonPoints me = skeletondrawer.me(depth_img.cols, depth_img.rows); //Depth画面中のskeletonを取得
if(skeleton_flag.IsTrue) {
me.Drawall(adjusted_camera_img);
}
if(me.IsTracked) { // 画面内に人がいたら
Point prhand = me.getPoint(NUI_SKELETON_POSITION_HAND_RIGHT);
Point plhand = me.getPoint(NUI_SKELETON_POSITION_HAND_LEFT);
Scalar color_rhand = Scalar(0, 0, 255);
Scalar color_lhand = Scalar(0, 255, 0);
#pragma region // --- shoot ---
if( prhand.inside(Rect(0, 0, depth_img.cols, depth_img.rows)) ) {
ushort drhand = depth_img.at<ushort>(prhand);
if((norm(prhand-prhand_prev) < THRESH_SAMEPOS) && abs(drhand - drhand_prev) < THRESH_SAMEDEPTH) {
rballsize += 0.5;
if(rballsize > 10) { // 十分大きくなったら
rballsize = 20;
}
}
else {
if(rballsize == 20) { // チャージ後初めて動いた
std::cout<<"start"<<drhand<<"delta"<<drhand-drhand_prev<<std::endl;
rball = mine::ThrownObject(depth_img, prhand, drhand, prhand-prhand_prev, drhand-drhand_prev);
//rball = mine::ThrownObject(depth_img, prhand, depth_img.at<ushort>(me.getPoint(NUI_SKELETON_POSITION_SPINE)), prhand-prhand_prev, 0);
}
rballsize = 3.0;
}
// 更新
prhand_prev = prhand;
drhand_prev = drhand;
//.........这里部分代码省略.........
示例4: main
int main(int argc, char *argv[]){
#pragma region // --- init ---
//kinectクラスの宣言と初期化
Kinect kinect;
kinect.Initialize(NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX );
//ストリーム作る。画像用とデプス用
ImageStream& video = kinect.VideoStream();
video.Open( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480 );
ImageStream& depth = kinect.DepthStream();
depth.Open( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240 );
//skeletonを使う準備
kinect::nui::SkeletonEngine& skeleton = kinect.Skeleton();
skeleton.Enable();
//opencvのmatとwindowの準備
namedWindow("camera_window");
Mat camera_img = Mat(Size(video.Width(), video.Height()), CV_8UC4);
namedWindow("depth_window");
Mat depth_img = Mat(Size(depth.Width(), depth.Height()), CV_16UC1); //depthは16bitみたい
//Depthとcameraの位置合わせ(kinect_utility.h)
ColorFromDepthEngine CFDengine;
#pragma endregion
const int DISPWIDTH = 1500;
const int DISPHEIGHT = 860;
#pragma region // --- window init ---
cv::namedWindow("full");
cv::setWindowProperty("full", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
cv::setWindowProperty("full", CV_WND_PROP_AUTOSIZE, CV_WINDOW_AUTOSIZE);
std::cout<<cv::getWindowProperty("full", CV_WND_PROP_AUTOSIZE)<<CV_WINDOW_AUTOSIZE<<std::endl;
std::cout<<cv::getWindowProperty("full", CV_WND_PROP_FULLSCREEN)<<CV_WINDOW_FULLSCREEN<<std::endl;
cvMoveWindow("full", -10, -50);
cv::Point colorpoint(100,100);
mine::flag hogeflag;
#pragma endregion
while ( 1 ) {
# pragma region // --- get data ---
// データの更新を待つ
kinect.WaitAndUpdateAll();
//Skeletonを取得
kinect::nui::SkeletonFrame skeletonframe= skeleton.GetNextFrame();
// 次のフレームのデータを取得する(OpenNIっぽく)
ImageFrame image( video );
DepthFrame depthMD( depth );
// cv::Matへのデータのコピー
camera_img = Mat(camera_img.size(), CV_8UC4, (BYTE *)image.Bits());
depth_img = Mat(depth_img.size(), CV_16UC1, (BYTE *)depthMD.Bits());
// adjust with utility
cv::Mat adjusted_camera_img(depth_img.size(), CV_8UC4);
adjusted_camera_img = CFDengine.getAdjustedImage(camera_img);
#pragma endregion
#pragma region // --- processing ---
SkeletonDrawer skeletondrawer(skeletonframe);
SkeletonPoints me = skeletondrawer.me(depth_img.cols, depth_img.rows); //Depth画面中のskeletonを取得
/*
if(me.IsTracked){ // 画面内に人がいたら
//me.Drawall(adjusted_camera_img); // Skeleton を描く
// 左右の手
circle(adjusted_camera_img, me.getPoint(NUI_SKELETON_POSITION_HAND_LEFT), 5, cv::Scalar(0,0,255), -1);
circle(adjusted_camera_img, me.getPoint(NUI_SKELETON_POSITION_HAND_RIGHT), 5, cv::Scalar(0,255,0), -1);
}
*/
#pragma endregion
#pragma region // --- show ---
cv::resize(adjusted_camera_img, camera_img, camera_img.size());
cv::Mat proj = cv::Mat( DISPHEIGHT,DISPWIDTH, CV_8UC4, cv::Scalar(0));
cv::circle(proj, colorpoint, 30, cv::Scalar(0, 0, 255), -1);
cv::imshow("full", proj);
std::vector<cv::Mat> planes;
std::vector<cv::Mat> colors;
for(int i=0;i<3;i++){colors.push_back(cv::Mat(adjusted_camera_img.size(), CV_8UC1));}
cv::split(adjusted_camera_img, planes);
//cv::threshold(planes[0], colors[0], 200, 255, THRESH_BINARY);
cv::threshold(planes[0], colors[0], 200, 255, THRESH_BINARY_INV);
//cv::imshow("B",colors[0]);
cv::threshold(planes[1], colors[1], 200, 255, THRESH_BINARY_INV);
//cv::threshold(planes[1], colors[1], 10, 255, THRESH_BINARY_INV);
//cv::imshow("G",colors[1]);
//cv::threshold(planes[2], colors[2], 200, 255, THRESH_BINARY);
cv::threshold(planes[2], colors[2], 230, 255, THRESH_BINARY);
//.........这里部分代码省略.........