本文整理汇总了C++中xn::ImageGenerator::GetRGB24ImageMap方法的典型用法代码示例。如果您正苦于以下问题:C++ ImageGenerator::GetRGB24ImageMap方法的具体用法?C++ ImageGenerator::GetRGB24ImageMap怎么用?C++ ImageGenerator::GetRGB24ImageMap使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类xn::ImageGenerator
的用法示例。
在下文中一共展示了ImageGenerator::GetRGB24ImageMap方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Loop
void Loop(void)
{
XnStatus nRetVal = XN_STATUS_OK;
while (g_notDone)
{
if ((nRetVal = g_context.WaitOneUpdateAll(g_depth)) != XN_STATUS_OK)
//if ((nRetVal = g_context.WaitAndUpdateAll()) != XN_STATUS_OK)
{
fprintf(stderr,"Could not update data: %s\n", xnGetStatusString(nRetVal));
continue;
}
if (g_haveDepth)
{
const XnDepthPixel* pDepthMap = g_depth.GetDepthMap();
ProcessDepthFrame(pDepthMap, g_depthWidth, g_depthHeight);
FindFingertip();
}
if (g_haveImage)
{
const XnRGB24Pixel* pImageMap = g_image.GetRGB24ImageMap();
ProcessImageFrame(pImageMap, g_depthWidth, g_depthHeight);
}
ShowFrame();
CheckKeys();
}
}
示例2: glut_display
void glut_display() {
xn::DepthMetaData pDepthMapMD;
xn::ImageMetaData pImageMapMD;
#ifdef DEBUGOUT
ofstream datei;
#endif
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
gluPerspective(45, WINDOW_SIZE_X/WINDOW_SIZE_Y, 1000, 5000);
// glOrtho(0, WINDOW_SIZE_X, WINDOW_SIZE_Y, 0, -128, 128);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
// glTranslatef(-12.8/640.0, 9.0/480.0, 0);
// glTranslatef(-12.8/630.0, 9.0/480.0,0);
glScalef(scalex, scaley, 1.0);
glTranslatef(transx/630, transy/480, 0.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
rot_angle+=0.7;
// Warten auf neue Daten vom Tiefengenerator
nRetVal = context.WaitAndUpdateAll();
checkError("Fehler beim Aktualisieren der Daten", nRetVal);
// Aktuelle Depth Metadaten auslesen
depth.GetMetaData(pDepthMapMD);
// Aktuelle Depthmap auslesen
const XnDepthPixel* pDepthMap = depth.GetDepthMap();
if(maxdepth==-1)
maxdepth = getMaxDepth(pDepthMap);
// Aktuelle Image Metadaten auslesen
image.GetMetaData(pImageMapMD);
//Aktuelles Bild auslesen
const XnRGB24Pixel* pImageMap = image.GetRGB24ImageMap();
glColor3f(1, 1, 1);
// XnDepthPixel maxdepth = depth.GetDeviceMaxDepth();
const unsigned int xres = pDepthMapMD.XRes();
const unsigned int yres = pDepthMapMD.YRes();
#ifdef DEBUGOUT
datei.open("daniel.txt", ios::out);
#endif
for(unsigned int y=0; y<yres-1; y++) {
for(unsigned int x=0; x<xres; x++) {
aDepthMap[x+y*xres] = static_cast<GLubyte>(static_cast<float>(pDepthMap[x+y*xres])/static_cast<float>(maxdepth)*255);
}
}
/*
glEnable(GL_TEXTURE_2D);
glPushMatrix();
glLoadIdentity();
glTranslatef(-800, 0, -2000);
glBindTexture(GL_TEXTURE_2D, texture_rgb);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, pImageMap);
glBegin(GL_QUADS);
glTexCoord2f(0,1); glVertex3f(0,0,0);
glTexCoord2f(1,1); glVertex3f(640,0,0);
glTexCoord2f(1,0); glVertex3f(640,480,0);
glTexCoord2f(0,0); glVertex3f(0,480,0);
glEnd();
glPopMatrix();
glPushMatrix();
glLoadIdentity();
glTranslatef(-800, 600, -2000);
glBindTexture(GL_TEXTURE_2D, texture_depth);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE8, 640, 480, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, aDepthMap);
glBegin(GL_QUADS);
glTexCoord2f(0,1); glVertex3f(0,0,0);
glTexCoord2f(1,1); glVertex3f(640,0,0);
glTexCoord2f(1,0); glVertex3f(640,480,0);
glTexCoord2f(0,0); glVertex3f(0,480,0);
glEnd();
glPopMatrix();*/
glPushMatrix();
glLoadIdentity();
glTranslatef(-100, -100, -2000);
glRotatef(cx,0,1,0);
glRotatef(cy,1,0,0);
glTranslatef(-320, -240, 1000);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture_rgb);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, pImageMap);
glBegin(GL_POINTS);
for(unsigned int y=0; y<yres-1; y++) {
for(unsigned int x=0; x<630; x++) {
if(pDepthMap[x+y*xres]!=0) {
//.........这里部分代码省略.........
示例3: start_kinect
void start_kinect() {
XnStatus nRetVal = XN_STATUS_OK;
xn::EnumerationErrors errors;
UsersCount = 0;
const char *fn = NULL;
if (fileExists(SAMPLE_XML_PATH)) fn = SAMPLE_XML_PATH;
else if (fileExists(SAMPLE_XML_PATH_LOCAL)) fn = SAMPLE_XML_PATH_LOCAL;
else {
printf("Could not find '%s' nor '%s'. Aborting.\n" , SAMPLE_XML_PATH, SAMPLE_XML_PATH_LOCAL);
//return XN_STATUS_ERROR;
}
printf("Reading config from: '%s'\n", fn);
nRetVal = g_Context.InitFromXmlFile(fn, g_scriptNode, &errors);
if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
{
XnChar strError[1024];
errors.ToString(strError, 1024);
printf("%s\n", strError);
//return (nRetVal);
}
else if (nRetVal != XN_STATUS_OK)
{
printf("Open failed: %s\n", xnGetStatusString(nRetVal));
//return (nRetVal);
}
nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
CHECK_RC(nRetVal,"No depth");
nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
CHECK_RC(nRetVal,"No image");
nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
if (nRetVal != XN_STATUS_OK)
{
nRetVal = g_UserGenerator.Create(g_Context);
CHECK_RC(nRetVal, "Find user generator");
}
XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected;
if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
{
printf("Supplied user generator doesn't support skeleton\n");
//return 1;
}
nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
CHECK_RC(nRetVal, "Register to user callbacks");
nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart);
CHECK_RC(nRetVal, "Register to calibration start");
nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete);
CHECK_RC(nRetVal, "Register to calibration complete");
if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
{
g_bNeedPose = TRUE;
if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
{
printf("Pose required, but not supported\n");
//return 1;
}
nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
CHECK_RC(nRetVal, "Register to Pose Detected");
g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
}
g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
nRetVal = g_Context.StartGeneratingAll();
CHECK_RC(nRetVal, "StartGenerating");
XnUserID aUsers[MAX_NUM_USERS];
XnUInt16 nUsers;
XnSkeletonJointTransformation anyjoint;
printf("Starting to run\n");
if(g_bNeedPose)
{
printf("Assume calibration pose\n");
}
XnUInt32 epochTime = 0;
while (!xnOSWasKeyboardHit())
{
g_Context.WaitOneUpdateAll(g_UserGenerator);
// print the torso information for the first user already tracking
nUsers=MAX_NUM_USERS;
g_UserGenerator.GetUsers(aUsers, nUsers);
int numTracked=0;
int userToPrint=-1;
WriteLock w_lock(myLock);
pDepthMap = g_depth.GetDepthMap();
pPixelMap = g_image.GetRGB24ImageMap();
g_depth.GetMetaData(g_depthMD);
g_image.GetMetaData(g_imageMD);
pPixelPoint = g_imageMD.RGB24Data();
//.........这里部分代码省略.........
示例4: main
int main() {
const unsigned int nBackgroundTrain = 30;
const unsigned short touchDepthMin = 10;
const unsigned short touchDepthMax = 20;
const unsigned int touchMinArea = 50;
const bool localClientMode = true; // connect to a local client
const double debugFrameMaxDepth = 4000; // maximal distance (in millimeters) for 8 bit debug depth frame quantization
const char* windowName = "Debug";
const char* colorWindowName = "image";
const Scalar debugColor0(0, 0, 128);
const Scalar debugColor1(255, 0, 0);
const Scalar debugColor2(255, 255, 255);
const Scalar debugColor3(0, 255, 255);
const Scalar debugColor4(255, 0, 255);
int xMin = 50;
int xMax = 550;
int yMin = 50;
int yMax = 300;
Mat1s depth(480, 640); // 16 bit depth (in millimeters)
Mat1b depth8(480, 640); // 8 bit depth
Mat3b rgb(480, 640); // 8 bit depth
Mat3b debug(480, 640); // debug visualization
Mat1s foreground(640, 480);
Mat1b foreground8(640, 480);
Mat1b touch(640, 480); // touch mask
Mat1s background(480, 640);
vector<Mat1s> buffer(nBackgroundTrain);
IplImage * image = cvCreateImage(cvSize(640, 480), 8, 3);
IplImage * convertedImage = cvCreateImage(cvSize(640, 480), 8, 3);
initOpenNI("niConfig.xml");
// TUIO server object
TuioServer* tuio;
if (localClientMode) {
tuio = new TuioServer();
} else {
tuio = new TuioServer("192.168.0.2", 3333, false);
}
TuioTime time;
namedWindow(colorWindowName);
createTrackbar("xMin", colorWindowName, &xMin, 640);
createTrackbar("xMax", colorWindowName, &xMax, 640);
createTrackbar("yMin", colorWindowName, &yMin, 480);
createTrackbar("yMax", colorWindowName, &yMax, 480);
// create some sliders
namedWindow(windowName);
createTrackbar("xMin", windowName, &xMin, 640);
createTrackbar("xMax", windowName, &xMax, 640);
createTrackbar("yMin", windowName, &yMin, 480);
createTrackbar("yMax", windowName, &yMax, 480);
Keyboard * piano = new Keyboard();
(*piano).initKeyMap();
system("qjackctl &");
sleep(4);
JackByTheNotes * notesJack = new JackByTheNotes();
notesJack->connect();
sleep(2);
system("sudo jack_connect Piano:Rubinstein system:playback_1 &");
map<double, timeval> keys;
// create background model (average depth)
for (unsigned int i = 0; i < nBackgroundTrain; i++) {
xnContext.WaitAndUpdateAll();
depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
buffer[i] = depth;
}
average(buffer, background);
while (waitKey(1) != 27) {
// read available data
xnContext.WaitAndUpdateAll();
// update 16 bit depth matrix
depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
//xnImgeGenertor.GetGrayscale8ImageMap()
XnRGB24Pixel* xnRgb =
const_cast<XnRGB24Pixel*>(xnImgeGenertor.GetRGB24ImageMap());
// IplImage * image = cvCreateImage(cvSize(640, 480), 8, 3);
// IplImage * convertedImage = cvCreateImage(cvSize(640, 480), 8, 3);
cvSetData (image, xnRgb, 640 * 3);
cvConvertImage(image, convertedImage, CV_CVTIMG_SWAP_RB);
bool color = true;
rgb = convertedImage;
// cvtColor(rgb,rgb,CV_RGB2BGR);
//.........这里部分代码省略.........