本文整理汇总了C++中ImageMetaData类的典型用法代码示例。如果您正苦于以下问题:C++ ImageMetaData类的具体用法?C++ ImageMetaData怎么用?C++ ImageMetaData使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ImageMetaData类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: DrawEdges
void OpencvModule::DrawEdges(ImageMetaData& g_imageMD){
int key=0;
//for opencv Mat, accessing buffer
Mat rgb(480,640,CV_8UC3,(uchar*)g_imageMD.WritableData());
cvtColor(rgb,gray,CV_RGB2GRAY);
//EdgesRgb
Canny(gray,grayedge,fThresCanny1,fThresCanny2);
cvtColor(grayedge,rgbedge,CV_GRAY2BGR);
float aux=((float)g_imageMD.Timestamp())/1E6;
QVariant time_double(aux);
putText(rgbedge,"Time:", cvPoint(460,30),5,1,cvScalar(255, 255, 255, 0),1,1);
putText(rgbedge,time_double.toString().toStdString(), cvPoint(535,30),6,0.6,cvScalar(255, 255, 255, 0),1,1);
imshow("Caremedia Kinect Viewer",rgbedge);
key = waitKey(5);
}
示例2: mResource
ImageResource::ImageResource(const ds::Resource& res, const int flags)
: mResource(res)
, mFlags(flags) {
ImageMetaData md;
md.add(res.getAbsoluteFilePath(), ci::vec2(res.getWidth(), res.getHeight()));
}
示例3: AbstractImageStreamProvider
ImageProvider::ImageProvider(Context* pContext) : AbstractImageStreamProvider(pContext)
{
CALL_XN( pContext->FindExistingNode(XN_NODE_TYPE_IMAGE, m_imageGen) );
ImageMetaData md;
m_imageGen.GetMetaData(md);
CHECK_ERROR(md.PixelFormat() == XN_PIXEL_FORMAT_RGB24, "This camera's data format is not supported.");
CHECK_ERROR(md.XRes() == 640 && md.YRes() == 480, "This camera's resolution is not supported.");
}
示例4: GetColorAndDepthImages
// Gets the colour and depth data from the Kinect sensor.
bool GetColorAndDepthImages(ColorImage& colorImage, DepthImage& depthImage)
{
XnStatus rc = XN_STATUS_OK;
// Read a new frame, blocking operation
rc = deviceContext.WaitAnyUpdateAll();
if (rc != XN_STATUS_OK)
{
/*LOGE("Read failed: %s\n", xnGetStatusString(rc));*/
throw rc;
}
// Get handles to new data
static ImageMetaData colorImageMetaData;
static DepthMetaData depthImageMetaData;
colorImageGenerator.GetMetaData(colorImageMetaData);
depthImageGenerator.GetMetaData(depthImageMetaData);
// Validate images
if (!depthImageGenerator.IsValid() || !colorImageGenerator.IsValid())
{
/*LOGE("Error: Color or depth image is invalid.");*/
throw 1;
}
if (colorImageMetaData.Timestamp() <= mostRecentRGB)
return false;
// Fetch pointers to data
const XnRGB24Pixel* pColorImage = colorImageMetaData.RGB24Data(); //g_depth.GetRGB24ImageMap()
const XnDepthPixel* pDepthImage = depthImageMetaData.Data();// g_depth.GetDepthMap();
// Copy data over to arrays
memcpy(colorImage.data, pColorImage, sizeof(colorImage.data));
memcpy(depthImage.data, pDepthImage, sizeof(depthImage.data));
colorImage.rows = colorImage.maxRows;
colorImage.cols = colorImage.maxCols;
depthImage.rows = depthImage.maxRows;
depthImage.cols = depthImage.maxCols;
mostRecentRGB = colorImageMetaData.Timestamp();
return true;
}
示例5:
void ImageScanThread<DBFS>::PopulateMetadata
(const QString &path, int type, QString &comment, uint &time, int &orientation)
{
// Set orientation, date, comment from file meta data
ImageMetaData *metadata = (type == kImageFile)
? ImageMetaData::FromPicture(path)
: ImageMetaData::FromVideo(path);
orientation = metadata->GetOrientation();
comment = metadata->GetComment().simplified();
QDateTime dt = metadata->GetOriginalDateTime();
time = (dt.isValid()) ? dt.toTime_t() : 0;
delete metadata;
}
示例6: transformImageMD
void transformImageMD(Mat FrameImage,ImageMetaData& imageMD)
{
RGB24Map& imageMap = imageMD.WritableRGB24Map();
for (XnUInt32 y = 0; y < imageMD.YRes(); y++)
{
for (XnUInt32 x = 0; x <imageMD.XRes(); x++)
{
cout<<" x "<<x<<" y "<<y<<endl;
XnRGB24Pixel imagePixel;
imagePixel.nBlue=FrameImage.at<Vec3b>(y,x)[0];
imagePixel.nGreen=FrameImage.at<Vec3b>(y,x)[1];
imagePixel.nRed=FrameImage.at<Vec3b>(y,x)[2];
imageMap(x,y) = imagePixel;
cout<<" 76 "<<endl;
}
}
}
示例7: DrawRGB
void OpencvModule::DrawRGB(ImageMetaData& g_imageMD){
int key=0;
Mat RGB(480,640,CV_8UC3,(uchar*)g_imageMD.WritableData());
cvtColor(RGB,image_BGR,CV_RGB2BGR);
float aux=((float)g_imageMD.Timestamp())/1E6;
QVariant time_double(aux);
putText(image_BGR,"Time:", cvPoint(460,30),5,1,cvScalar(255, 255, 255, 0),1,1);
putText(image_BGR,time_double.toString().toStdString(), cvPoint(535,30),6,0.6,cvScalar(255, 255, 255, 0),1,1);
imshow("Caremedia Kinect Viewer",image_BGR);
key = waitKey(5);
}
示例8: captureRGB
void captureRGB(unsigned char* g_ucImageBuffer)
{
ImageMetaData imd;
_image.GetMetaData(imd);
unsigned int nValue = 0;
unsigned int nX = 0;
unsigned int nY = 0;
XnUInt16 g_nXRes = imd.XRes();
XnUInt16 g_nYRes = imd.YRes();
const XnRGB24Pixel * pImageMap = _image.GetRGB24ImageMap();
for (nY=0; nY<g_nYRes; nY++)
{
for (nX=0; nX < g_nXRes; nX++)
{
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+0] = pImageMap[nY*g_nXRes+nX].nBlue;
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+1] = pImageMap[nY*g_nXRes+nX].nGreen;
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+2] = pImageMap[nY*g_nXRes+nX].nRed;
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+3] = 0x00;
}
}
}
示例9: drawBackground
void WorldRenderer::drawBackground()
{
m_rctx->orthoMatrix.PushMatrix();
{
//TODO: find out what this does
//m_rctx->orthoMatrix.Translate(
// float(m_rng.gaussian(0.6)) * currentIntensity * 0.01f,
// float(m_rng.gaussian(0.6)) * currentIntensity * 0.01f,
// 0);
// setup shader
m_rctx->shaderMan->UseStockShader(GLT_SHADER_SHADED, m_rctx->orthoMatrix.GetMatrix());
// get depth buffer
DepthMetaData dmd;
m_depthGen->GetMetaData(dmd);
const XnDepthPixel* dp = dmd.Data();
// get image buffer
ImageMetaData imd;
m_imageGen->GetMetaData(imd);
const XnRGB24Pixel* ip = imd.RGB24Data();
// get working buffers
M3DVector3f* vp = m_vertexBuf;
M3DVector4f* cp = m_colorBuf;
XnUInt32 numPoints = getNumPoints();
// setup henshin-related information
const float Z_SCALE = 10.0f;
XnUserID userID = 0;
const XnLabel* lp = NULL;
XV3 headCenter, headDirection;
getHenshinData(&userID, &lp, &headCenter, &headDirection);
float lightRadius = 900.0f;
bool isTracked = userID && lp;
const int NUM_BALLS = 3;
XV3 ball_centers[NUM_BALLS];
bool ball_enabled_flags[NUM_BALLS];
float ball_radius[3];
float ball_core_radius[3];
float ball_core_radius2[3];
//get the ball centres and transform into projective coords
//Also calculate an appropriate radius to make the ball scale as it moves away from the camera
for (int j=0; j< NUM_BALLS; j++) {
m_ball_manager->GetBallInfo(j, &ball_enabled_flags[j],&ball_centers[j]);
if(!ball_enabled_flags[j]) continue;
XV3 ball_top(ball_centers[j]); //copy the ball center before transformation
m_depthGen->ConvertRealWorldToProjective(1, &ball_centers[j], &ball_centers[j]);
normalizeProjective(&ball_centers[j]);
//this is probably a clunky way to transform the radius into projectiev coods but it seems to work ok
ball_top.Y +=lightRadius;
m_depthGen->ConvertRealWorldToProjective(1, &ball_top, &ball_top);
normalizeProjective(&ball_top);
ball_radius[j] = fabs(ball_top.Y-ball_centers[j].Y);
ball_core_radius[j] = ball_radius[j]*0.1f;
ball_core_radius2[j] = square(ball_core_radius[j]);
}
XnUInt32 ix = 0, iy = 0;
float nearZ = PERSPECTIVE_Z_MIN + m_depthAdjustment;
for (XnUInt32 i = 0; i < numPoints; i++, dp++, ip++, vp++, cp++, lp++, ix++) {
if (ix == m_width) {
ix = 0;
iy++;
}
// (*vp)[0] (x) is already set
// (*vp)[1] (y) is already set
(*vp)[2] = (*dp) ? getNormalizedDepth(*dp, nearZ, PERSPECTIVE_Z_MAX) : Z_INFINITE;
setRGB(cp, *ip);
//highlight the tracked user
if(isTracked) {
if(*lp == userID) {
(*cp)[0] *= 1.2f;
(*cp)[1] *= 1.2f;
(*cp)[2] *= 1.2f;
}
}
// draw balls
for(int j=0; j < NUM_BALLS; j++) {
if(!ball_enabled_flags[j]) continue;
XV3& lightCenter = ball_centers[j];
//float ball_depth = (*dp) ? getNormalizedDepth(ball_radius[j], nearZ, PERSPECTIVE_Z_MAX) : 0;
if((*vp)[2] < (lightCenter.Z - 0.001*ball_radius[j])) continue; //don't draw obscured pixels
//.........这里部分代码省略.........
示例10: convertImageRGB
// -----------------------------------------------------------------------------------------------------
// convertImageRGB
// -----------------------------------------------------------------------------------------------------
void convertImageRGB(const XnRGB24Pixel* pImageMap, IplImage* pImgRGB)
{
// Convert from OpenNI buffer to IplImage 24 bit, 3 channels
for(unsigned int i=0; i<g_imageMD.XRes()*g_imageMD.YRes(); i++)
{
pImgRGB->imageData[3*i+0]=pImageMap[i].nBlue;
pImgRGB->imageData[3*i+1]=pImageMap[i].nGreen;
pImgRGB->imageData[3*i+2]=pImageMap[i].nRed;
}
}
示例11: prepare
XnStatus prepare(char useScene, char useDepth, char useImage, char useIr, char useHistogram)
{
//TODO handle possible failures! Gotcha!
if (useDepth)
{
mDepthGen.GetMetaData(depthMD);
nXRes = depthMD.XRes();
nYRes = depthMD.YRes();
pDepth = depthMD.Data();
if (useHistogram)
{
calcHist();
// rewind the pointer
pDepth = depthMD.Data();
}
}
if (useScene)
{
mUserGen.GetUserPixels(0, sceneMD);
nXRes = sceneMD.XRes();
nYRes = sceneMD.YRes();
pLabels = sceneMD.Data();
}
if (useImage)
{
mImageGen.GetMetaData(imageMD);
nXRes = imageMD.XRes();
nYRes = imageMD.YRes();
pRGB = imageMD.RGB24Data();
// HISTOGRAM?????
}
if (useIr)
{
mIrGen.GetMetaData(irMD);
nXRes = irMD.XRes();
nYRes = irMD.YRes();
pIR = irMD.Data();
// HISTOGRAM????
}
}
示例12: takePhoto
void takePhoto() {
static int index = 1;
char fname[256] = {0,};
sprintf(fname, "kinect%03d.txt", index++);
g_depth.GetMetaData(g_depthMD);
g_image.GetMetaData(g_imageMD);
int const nx = g_depthMD.XRes();
int const ny = g_depthMD.YRes();
assert(nx == g_imageMD.XRes());
assert(ny == g_imageMD.YRes());
const XnDepthPixel* pDepth = g_depthMD.Data();
const XnUInt8* pImage = g_imageMD.Data();
FILE * file = fopen(fname, "wb");
fprintf(file, "%d\n%d\n\n", nx, ny);
for (int y = 0, di = 0, ri = 0, gi = 1, bi = 2; y < ny; y++) {
for (int x = 0; x < nx; x++, di++, ri += 3, gi += 3, bi += 3) {
int const r = pImage[ri];
int const g = pImage[gi];
int const b = pImage[bi];
int const d = pDepth[di];
assert(r >= 0);
assert(g >= 0);
assert(b >= 0);
assert(d >= 0);
assert(r <= 0xFF);
assert(g <= 0xFF);
assert(b <= 0xFF);
assert(d <= 0xFFFF);
fprintf(file, "%3d %3d %3d %5d\n", r, g, b, d);
}
fprintf(file, "\n");
}
fflush(file);
fclose(file);
}
示例13: main
int main(int argc, char* argv[])
{
XnStatus rc;
EnumerationErrors errors;
rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
if (rc == XN_STATUS_NO_NODE_PRESENT)
{
XnChar strError[1024];
errors.ToString(strError, 1024);
printf("%s\n", strError);
return (rc);
}
else if (rc != XN_STATUS_OK)
{
printf("Open failed: %s\n", xnGetStatusString(rc));
return (rc);
}
rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
g_depth.GetMetaData(g_depthMD);
g_image.GetMetaData(g_imageMD);
// Hybrid mode isn't supported in this sample
if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes())
{
printf ("The device depth and image resolution must be equal!\n");
return 1;
}
// RGB is the only image format supported.
if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
{
printf("The device image format must be RGB24\n");
return 1;
}
// Texture map init
g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512;
g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512;
g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));
// OpenGL init
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(GL_WIN_SIZE_X, GL_WIN_SIZE_Y);
glutCreateWindow ("OpenNI Simple Viewer");
glutFullScreen();
glutSetCursor(GLUT_CURSOR_NONE);
glutKeyboardFunc(glutKeyboard);
glutDisplayFunc(glutDisplay);
glutIdleFunc(glutIdle);
glDisable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
// Per frame code is in glutDisplay
glutMainLoop();
return 0;
}
示例14: drawImage
//----------------------------------------------------
// イメージ描画
//----------------------------------------------------
void drawImage(void){
switch(g_nViewState){
case DISPLAY_MODE_OVERLAY: // ノーマル描画モード
case DISPLAY_MODE_DEPTH:
case DISPLAY_MODE_IMAGE:
glMatrixMode(GL_PROJECTION); // 射影変換の行列の設定
glLoadIdentity(); // スタックのクリア
gluOrtho2D(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0); // ワールド座標系を正規化デバイス座標系に平行投影(left, right, buttom, top, near, far)
// ★平行投影する事によって,ポイントクラウドも平面に投影でき,クロマキーに最適
// Kinectの距離は約500〜9000まで使える(設定は10000)
glMatrixMode(GL_MODELVIEW); // モデルビュー変換の行列の設定
glLoadIdentity();
glEnable(GL_TEXTURE_2D); // テクスチャマッピングの有効化
// テクスチャパラメータの設定と定義
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, g_nTexMapX, g_nTexMapY, 0, GL_RGB, GL_UNSIGNED_BYTE, g_pTexMap); // イメージデータ貼り付け
// Display the OpenGL texture map
glColor4f(1,1,1,1);
// イメージデータの貼り付け
glBegin(GL_QUADS); // 四角形の描画を行う
{
int nXRes = g_depthMD.FullXRes();
int nYRes = g_depthMD.FullYRes();
// 左上
glTexCoord2f(0, 0);
glVertex2f(0, 0); // 座標指定
// 右上
glTexCoord2f((float)nXRes/(float)g_nTexMapX, 0);
glVertex2f(GL_WIN_SIZE_X, 0); // 座標指定
// 右下
glTexCoord2f((float)nXRes/(float)g_nTexMapX, (float)nYRes/(float)g_nTexMapY);
glVertex2f(GL_WIN_SIZE_X, GL_WIN_SIZE_Y); // 座標指定
// 左下
glTexCoord2f(0, (float)nYRes/(float)g_nTexMapY);
glVertex2f(0, GL_WIN_SIZE_Y); // 座標指定
}
glEnd();
glDisable(GL_TEXTURE_2D); // テクスチャマッピングの無効化
break;
case DISPLAY_MODE_CHROMA: // ポイントクラウド描画モード
case DISPLAY_MODE_POINT_CLOUD:
// 投影変換
glMatrixMode(GL_PROJECTION); // 射影変換の行列の設定
glLoadIdentity(); // スタックのクリア
glOrtho(0, KINECT_IMAGE_WIDTH,
KINECT_IMAGE_HEIGHT, 0,
-1.0, -KINECT_MAX_DEPTH - KINECT_VISIBLE_DELTA); // ワールド座標系を正規化デバイス座標系に平行投影(left, right, buttom, top, near, far)
// ★平行投影する事によって,ポイントクラウドも平面に投影でき,クロマキーに最適
// Kinectの距離は約500〜9000まで使える(設定は10000)
// 視野変換
gluLookAt(
g_lokEyeX, g_lokEyeY, g_lokEyeZ, // 視点の位置(初期位置:(0,0,-1))
g_lokDirX, g_lokDirY, g_lokDirZ, // 視点先の位置(初期位置:(0,0,-2))
0.0, 1.0, 0.0); // 向き
// モデリング変換
glMatrixMode(GL_MODELVIEW); // モデルビュー変換の行列の設定
glLoadIdentity(); // スタックのクリア
glEnable(GL_DEPTH_TEST); // 陰面処理の有効化
// ポイントクラウド表示
glPointSize(g_pointSize); // 点のサイズ
drawPointCloud(g_pBackTex, g_pBackDepth, g_pPoint); //背景画像表示
//drawPointCloud(g_imageMD.RGB24Data(), g_depthMD.Data(), 10, g_chromaThresh); // 人物抜き出し(距離の閾値)
drawPointCloudHuman(g_imageMD.RGB24Data(), g_depthMD.Data(), g_sceneMD.Data(), g_pPoint); // 人物抜き出し(動くものを検出)
glDisable(GL_DEPTH_TEST); // 陰面処理の無効化
break;
}
}
示例15: setTexture
//----------------------------------------------------
// テクスチャの設定
//----------------------------------------------------
void setTexture(void){
xnOSMemSet(g_pTexMap, 0, g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel)); // g_pTexMapの全てに0を代入
// 描画モード1か3
if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_IMAGE){
const XnRGB24Pixel* pImageRow = g_imageMD.RGB24Data(); // g_imageMDのポインタ取得(画像データ取得)
XnRGB24Pixel* pTexRow = g_pTexMap + g_imageMD.YOffset() * g_nTexMapX;
for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){
const XnRGB24Pixel* pImage = pImageRow;
XnRGB24Pixel* pTex = pTexRow + g_imageMD.XOffset();
for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pImage, ++ pTex){
*pTex = *pImage;
}
pImageRow += g_imageMD.XRes();
pTexRow += g_nTexMapX;
}
}
// 描画モード1か2
if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_DEPTH){
const XnDepthPixel* pDepthRow = g_depthMD.Data();
XnRGB24Pixel* pTexRow = g_pTexMap + g_depthMD.YOffset() * g_nTexMapX;
const XnLabel* pLabel = g_sceneMD.Data();
for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){
const XnDepthPixel* pDepth = pDepthRow;
XnRGB24Pixel* pTex = pTexRow + g_depthMD.XOffset();
for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pDepth, ++ pTex, ++ pLabel){
int nHistValue = g_pDepthHist[*pDepth];
if(*pLabel){ // 人物なら
*pTex = userColor[*pLabel];
}else if (*pDepth != 0){
if(*pDepth < 1000){
*pTex = xnRGB24Pixel(nHistValue, 0, 0); // red
}else if(*pDepth < 2000){
*pTex = xnRGB24Pixel(0, nHistValue, 0); // green
}else if(*pDepth < 3000){
*pTex = xnRGB24Pixel(0, 0, nHistValue); // blue
}else if(*pDepth < 4000){
*pTex = xnRGB24Pixel(nHistValue, nHistValue, 0); // 水色
}else if(*pDepth < 5000){
*pTex = xnRGB24Pixel(0, nHistValue, nHistValue); // yellow
}else{
*pTex = xnRGB24Pixel(nHistValue, 0, nHistValue); // 紫
}
}
}
pDepthRow += g_depthMD.XRes();
pTexRow += g_nTexMapX;
}
}
// 描画モード4
//if (g_nViewState == DISPLAY_MODE_CHROMA){
// // イメージデータ(カメラ映像)貼り付け
// const XnRGB24Pixel* pImageRow = g_imageMD.RGB24Data(); // g_imageMDのポインタ取得(画像データ取得)
// XnRGB24Pixel* pTexRow = g_pTexMap + g_imageMD.YOffset() * g_nTexMapX;
// for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){ // 480
// const XnRGB24Pixel* pImage = pImageRow;
// XnRGB24Pixel* pTex = pTexRow + g_imageMD.XOffset();
// for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pImage, ++ pTex){ // 640
// *pTex = *pImage;
// }
// pImageRow += g_imageMD.XRes();
// pTexRow += g_nTexMapX;
// }
// // デプスデータを用いた人物抜き出し + 背景合成
// const XnDepthPixel* pDepthRow = g_depthMD.Data(); // デプスデータのポインタ取得
// pTexRow = g_pTexMap + g_depthMD.YOffset() * g_nTexMapX;
// GLuint g_backWidth = g_back.GetWidth(); // 背景の横幅の大きさ
// GLubyte* pBackData = g_back.GetData() + g_back.GetImageSize() - 3 * g_backWidth; // 背景のポインタ取得(最後から見ていく)
// for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){ // 480
// const XnDepthPixel* pDepth = pDepthRow; // デプスデータのポインタ取得
// XnRGB24Pixel* pTex = pTexRow + g_depthMD.XOffset();
// for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pDepth, ++ pTex){ // 640
// // 深さが0か閾値以上なら背景画像を描画(閾値以下ならその部分を残す)
// if (*pDepth == 0 || *pDepth >= g_chromaThresh){
// pTex->nRed = *pBackData;
// pTex->nGreen = *(pBackData + 1);
// pTex->nBlue = *(pBackData + 2);
// }
// pBackData += 3;
// }
//.........这里部分代码省略.........