本文整理汇总了C++中ImageGenerator类的典型用法代码示例。如果您正苦于以下问题:C++ ImageGenerator类的具体用法?C++ ImageGenerator怎么用?C++ ImageGenerator使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ImageGenerator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
void ImageGolographicThread :: run (void)
{
ImageGenerator* generator = new ImageGenerator(imageData);//, this);
generator->loadModel();
connect (generator, SIGNAL (imageVal (int)), this, SLOT (setPVal (int)) );
resD = generator->generateImages();
delete generator;
emit getImagesData (resD);
}
示例2: main
int main()
{
FramesTransmitter framesTransmitter;
ImageGenerator imageGenerator;
int imageWidth, imageHeight, imageBPP;
imageGenerator.GetImageParams(imageWidth, imageHeight, imageBPP);
framesTransmitter.Init("127.0.0.1", "5541", imageWidth, imageHeight, imageBPP);
//for (int i = 0; i < 100; ++i)
//{
// unsigned char * buff = imageGenerator.GenerateImage();
// framesTransmitter.Transmit(buff);
// // save image on HDD
// bitmap_image image(imageWidth, imageHeight);
// int sizeToCopy = sizeof(char) * imageWidth * imageHeight * imageBPP / 8;
// std::copy(buff, buff + sizeToCopy, image.data());
// char filePath[200];
// sprintf(filePath, "D:/eclipse_workspace/FramesTransmitter/FramesToSend/img%04d.bmp", i);
// image.save_image(filePath);
//}
int frameCounter = 0;
int channels = imageBPP / 8;
IplImage * image = cvCreateImage(cvSize(imageWidth, imageHeight), IPL_DEPTH_8U, channels);
while(cvWaitKey(40) != 113) // press 'q'
{
unsigned char * buff = imageGenerator.GenerateImage();
int sizeToCopy = sizeof(char) * imageWidth * imageHeight * channels;
std::copy(buff, buff + sizeToCopy, image -> imageData);
char str[100];
sprintf(str,"[%04d]", frameCounter++ );
CvFont font;
cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8);
cvPutText(image, str, cvPoint(10,40), &font, cvScalar(255,255,255));
cvShowImage("Display window", image);
framesTransmitter.Transmit((unsigned char*) image -> imageData);
}
cvDestroyWindow("Display window");
cvReleaseImage(&image);
return 0;
}
示例3: GetColorAndDepthImages
// Gets the colour and depth data from the Kinect sensor.
bool GetColorAndDepthImages(ColorImage& colorImage, DepthImage& depthImage)
{
XnStatus rc = XN_STATUS_OK;
// Read a new frame, blocking operation
rc = deviceContext.WaitAnyUpdateAll();
if (rc != XN_STATUS_OK)
{
/*LOGE("Read failed: %s\n", xnGetStatusString(rc));*/
throw rc;
}
// Get handles to new data
static ImageMetaData colorImageMetaData;
static DepthMetaData depthImageMetaData;
colorImageGenerator.GetMetaData(colorImageMetaData);
depthImageGenerator.GetMetaData(depthImageMetaData);
// Validate images
if (!depthImageGenerator.IsValid() || !colorImageGenerator.IsValid())
{
/*LOGE("Error: Color or depth image is invalid.");*/
throw 1;
}
if (colorImageMetaData.Timestamp() <= mostRecentRGB)
return false;
// Fetch pointers to data
const XnRGB24Pixel* pColorImage = colorImageMetaData.RGB24Data(); //g_depth.GetRGB24ImageMap()
const XnDepthPixel* pDepthImage = depthImageMetaData.Data();// g_depth.GetDepthMap();
// Copy data over to arrays
memcpy(colorImage.data, pColorImage, sizeof(colorImage.data));
memcpy(depthImage.data, pDepthImage, sizeof(depthImage.data));
colorImage.rows = colorImage.maxRows;
colorImage.cols = colorImage.maxCols;
depthImage.rows = depthImage.maxRows;
depthImage.cols = depthImage.maxCols;
mostRecentRGB = colorImageMetaData.Timestamp();
return true;
}
示例4: glutDisplay
void glutDisplay (void){
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Setup the OpenGL viewpoint
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
SceneMetaData sceneMD;
DepthMetaData depthMD;
ImageMetaData imageMD;
g_DepthGenerator.GetMetaData(depthMD);
glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
glDisable(GL_TEXTURE_2D);
//XnStatus rc = g_Context.WaitOneUpdateAll(g_DepthGenerator);
XnStatus rc = g_Context.WaitAnyUpdateAll();
CHECK_RC("Wait Data",rc);
g_DepthGenerator.GetMetaData(depthMD);
if(g_UserGenerator.IsValid())
g_UserGenerator.GetUserPixels(0, sceneMD);
g_ImageGenerator.GetMetaData(imageMD);
DrawDepthMap(depthMD, sceneMD);
DrawImage(imageMD);
glutSwapBuffers();
}//glutdisplay
示例5: generateFrame
// -----------------------------------------------------------------------------------------------------
// generateFrame
// -----------------------------------------------------------------------------------------------------
bool CameraDevice::generateFrame(IplImage* imgRGB, IplImage* imgDepth)
{
XnStatus nRetVal = XN_STATUS_OK;
const XnDepthPixel* pDepthMap = NULL;
const XnRGB24Pixel* pImageMap = NULL;
xnFPSMarkFrame(&g_xnFPS);
nRetVal = g_context.WaitAndUpdateAll();
if (nRetVal==XN_STATUS_OK)
{
g_depth.GetMetaData(g_depthMD);
g_image.GetMetaData(g_imageMD);
pDepthMap = g_depthMD.Data();
pImageMap = g_image.GetRGB24ImageMap();
printf("Frame %02d (%dx%d) Depth at middle point: %u. FPS: %f\r",
g_depthMD.FrameID(),
g_depthMD.XRes(),
g_depthMD.YRes(),
g_depthMD(g_depthMD.XRes()/2, g_depthMD.YRes()/2),
xnFPSCalc(&g_xnFPS));
// convert to OpenCV buffers
convertImageRGB(pImageMap, imgRGB);
convertImageDepth(pDepthMap, imgDepth);
return true;
}
return false;
}
示例6: seekFrame
void seekFrame(int nDiff)
{
XnStatus nRetVal = XN_STATUS_OK;
if (isPlayerOn())
{
const XnChar* strNodeName = NULL;
if (g_pPrimary != NULL)
{
strNodeName = g_pPrimary->GetName();
}
else if (g_Depth.IsValid())
{
strNodeName = g_Depth.GetName();
}
else if (g_Image.IsValid())
{
strNodeName = g_Image.GetName();
}
else if (g_IR.IsValid())
{
strNodeName = g_IR.GetName();
}
else if (g_Audio.IsValid())
{
strNodeName = g_Audio.GetName();
}
nRetVal = g_Player.SeekToFrame(strNodeName, nDiff, XN_PLAYER_SEEK_CUR);
if (nRetVal != XN_STATUS_OK)
{
displayMessage("Failed to seek: %s", xnGetStatusString(nRetVal));
return;
}
XnUInt32 nFrame = 0;
XnUInt32 nNumFrames = 0;
nRetVal = g_Player.TellFrame(strNodeName, nFrame);
if (nRetVal != XN_STATUS_OK)
{
displayMessage("Failed to tell frame: %s", xnGetStatusString(nRetVal));
return;
}
nRetVal = g_Player.GetNumFrames(strNodeName, nNumFrames);
if (nRetVal != XN_STATUS_OK)
{
displayMessage("Failed to get number of frames: %s", xnGetStatusString(nRetVal));
return;
}
displayMessage("Seeked %s to frame %u/%u", strNodeName, nFrame, nNumFrames);
}
}
示例7: kinectUpdate
// Updates to the latest image obtained from the Kinect
int kinectUpdate(void)
{
XnStatus nRetVal = context.WaitAndUpdateAll();
g_image.GetMetaData(g_imageMD);
//nRetVal = context.WaitOneUpdateAll(depth);
depth.GetMetaData(depthMD);
return nRetVal;
}
示例8: closeDevice
void closeDevice()
{
g_Player.Release();
g_Device.Release();
g_Depth.Release();
g_Image.Release();
g_IR.Release();
g_Audio.Release();
g_scriptNode.Release();
g_Context.Release();
}
示例9: CleanUpExit
void CleanUpExit() {
recorder.Release();
g_player.Release();
g_image.Release();
g_scriptNode.Release();
g_context.Release();
g_hands.Release();
g_gesture.Release();
free(g_pTexMap);
exit(1);
}
示例10: readFrame
void readFrame()
{
if (!g_Depth.IsValid() && !g_Image.IsValid() && !g_IR.IsValid() && !g_Audio.IsValid()) // @@@dded
return;
XnStatus rc = XN_STATUS_OK;
if (g_pPrimary != NULL)
{
rc = g_Context.WaitOneUpdateAll(*g_pPrimary);
}
else
{
rc = g_Context.WaitAnyUpdateAll();
}
if (rc != XN_STATUS_OK)
{
printf("Error: %s\n", xnGetStatusString(rc));
}
if (g_Depth.IsValid())
{
g_Depth.GetMetaData(g_DepthMD);
}
if (g_Image.IsValid())
{
g_Image.GetMetaData(g_ImageMD);
}
if (g_IR.IsValid())
{
g_IR.GetMetaData(g_irMD);
}
if (g_Audio.IsValid())
{
g_Audio.GetMetaData(g_AudioMD);
}
}
示例11: kinectInit
// Set up OpenNI to obtain 8-bit mono images from the Kinect's RGB camera
int kinectInit(void)
{
XnStatus nRetVal = XN_STATUS_OK;
ScriptNode scriptNode;
EnumerationErrors errors;
printf("Reading config from: '%s'\n", SAMPLE_XML_PATH_LOCAL);
nRetVal = context.InitFromXmlFile(SAMPLE_XML_PATH_LOCAL, scriptNode, &errors);
nRetVal = context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
//g_image.SetPixelFormat(XN_PIXEL_FORMAT_GRAYSCALE_8_BIT);
g_image.SetPixelFormat(XN_PIXEL_FORMAT_RGB24);
g_image.GetMetaData(g_imageMD);
nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
depth.GetMetaData(depthMD);
// nRetVal = depth.GetAlternativeViewPointCap().SetViewPoint(g_image);
//nRetVal = depth.GetFrameSyncCap().FrameSyncWith(g_image);
return nRetVal;
}
示例12: captureRGB
void captureRGB(unsigned char* g_ucImageBuffer)
{
ImageMetaData imd;
_image.GetMetaData(imd);
unsigned int nValue = 0;
unsigned int nX = 0;
unsigned int nY = 0;
XnUInt16 g_nXRes = imd.XRes();
XnUInt16 g_nYRes = imd.YRes();
const XnRGB24Pixel * pImageMap = _image.GetRGB24ImageMap();
for (nY=0; nY<g_nYRes; nY++)
{
for (nX=0; nX < g_nXRes; nX++)
{
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+0] = pImageMap[nY*g_nXRes+nX].nBlue;
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+1] = pImageMap[nY*g_nXRes+nX].nGreen;
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+2] = pImageMap[nY*g_nXRes+nX].nRed;
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+3] = 0x00;
}
}
}
示例13: changeRegistration
void changeRegistration(int nValue)
{
if (!g_Depth.IsValid() || !g_Depth.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT))
{
return;
}
if (!nValue)
{
g_Depth.GetAlternativeViewPointCap().ResetViewPoint();
}
else if (g_Image.IsValid())
{
g_Depth.GetAlternativeViewPointCap().SetViewPoint(g_Image);
}
}
示例14: prepare
XnStatus prepare(char useScene, char useDepth, char useImage, char useIr, char useHistogram)
{
//TODO handle possible failures! Gotcha!
if (useDepth)
{
mDepthGen.GetMetaData(depthMD);
nXRes = depthMD.XRes();
nYRes = depthMD.YRes();
pDepth = depthMD.Data();
if (useHistogram)
{
calcHist();
// rewind the pointer
pDepth = depthMD.Data();
}
}
if (useScene)
{
mUserGen.GetUserPixels(0, sceneMD);
nXRes = sceneMD.XRes();
nYRes = sceneMD.YRes();
pLabels = sceneMD.Data();
}
if (useImage)
{
mImageGen.GetMetaData(imageMD);
nXRes = imageMD.XRes();
nYRes = imageMD.YRes();
pRGB = imageMD.RGB24Data();
// HISTOGRAM?????
}
if (useIr)
{
mIrGen.GetMetaData(irMD);
nXRes = irMD.XRes();
nYRes = irMD.YRes();
pIR = irMD.Data();
// HISTOGRAM????
}
}
示例15: takePhoto
void takePhoto() {
static int index = 1;
char fname[256] = {0,};
sprintf(fname, "kinect%03d.txt", index++);
g_depth.GetMetaData(g_depthMD);
g_image.GetMetaData(g_imageMD);
int const nx = g_depthMD.XRes();
int const ny = g_depthMD.YRes();
assert(nx == g_imageMD.XRes());
assert(ny == g_imageMD.YRes());
const XnDepthPixel* pDepth = g_depthMD.Data();
const XnUInt8* pImage = g_imageMD.Data();
FILE * file = fopen(fname, "wb");
fprintf(file, "%d\n%d\n\n", nx, ny);
for (int y = 0, di = 0, ri = 0, gi = 1, bi = 2; y < ny; y++) {
for (int x = 0; x < nx; x++, di++, ri += 3, gi += 3, bi += 3) {
int const r = pImage[ri];
int const g = pImage[gi];
int const b = pImage[bi];
int const d = pDepth[di];
assert(r >= 0);
assert(g >= 0);
assert(b >= 0);
assert(d >= 0);
assert(r <= 0xFF);
assert(g <= 0xFF);
assert(b <= 0xFF);
assert(d <= 0xFFFF);
fprintf(file, "%3d %3d %3d %5d\n", r, g, b, d);
}
fprintf(file, "\n");
}
fflush(file);
fclose(file);
}