本文整理汇总了C++中cvShowImage函数的典型用法代码示例。如果您正苦于以下问题:C++ cvShowImage函数的具体用法?C++ cvShowImage怎么用?C++ cvShowImage使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvShowImage函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char **argv) {
// Prepare log file and check argument count
FILE* log_file = fopen("track_results.log","a");
if(argc != 4) {
fprintf(log_file, "Incorrect number of arguments.\n");
return 1;
}
int desired_fps = atoi(argv[3]);
if(desired_fps > 60 || desired_fps < 1) {
fprintf(log_file, "Invalid FPS: please select a value in the range [1-60].\n");
return 1;
}
////////// GROUND TRUTH SETUP AND PROCESSING //////////
// Open and extract bounding rect info from gt file
char buffer[100];
memset(buffer, 0, sizeof(buffer));
int gt_rect[4];
FILE* gt_file = fopen(argv[2], "r");
fgets(buffer, 100, gt_file);
char* token = strtok(buffer, ",");
gt_rect[0] = atoi(token);
int i = 1;
while(i < 4) {
token = strtok(NULL, ",");
gt_rect[i] = atoi(token);
i++;
}
fclose(gt_file);
// Load image and compress to a reasonable size
IplImage* gt = cvLoadImage(argv[1]);
IplImage* gt_resized = cvCreateImage(cvSize(320, 240), gt->depth, gt->nChannels); //1280,720
cvResize(gt, gt_resized, CV_INTER_NN);
// Show bounding rect
CvPoint corner1 = cvPoint(gt_rect[0], gt_rect[1]);
CvPoint corner2 = cvPoint(gt_rect[0] + gt_rect[2], gt_rect[1] + gt_rect[3]);
CvScalar rect_color = CV_RGB(255,0,0);
cvRectangle(gt_resized, corner1, corner2, rect_color, 2);
cvNamedWindow( "Ground Truth Reference", CV_WINDOW_AUTOSIZE );
cvShowImage( "Ground Truth Reference", gt_resized );
// Set ROI for ground truth
CvRect quarter = cvRect(gt_rect[0], gt_rect[1], gt_rect[2], gt_rect[3]);
cvSetImageROI(gt_resized, quarter);
////////// PREPARE GOPRO FOR VIDEO CAPTURE //////////
// Basic connectivity tests
HINTERNET hSession = WinHttpOpen( L"GoPro HTTP Transfer/1.1",
WINHTTP_ACCESS_TYPE_DEFAULT_PROXY,
WINHTTP_NO_PROXY_NAME,
WINHTTP_NO_PROXY_BYPASS, 0 );
if(hSession == NULL) {
printf("Error %u in WinHttpOpen.\n", GetLastError());
std::cin.get();
return 1;
}
if( !WinHttpSetTimeouts( hSession, 10000, 10000, 10000, 10000 )) {
printf( "Error %u in WinHttpSetTimeouts.\n", GetLastError());
std::cin.get();
return 1;
}
HINTERNET hConnect = WinHttpConnect( hSession, L"10.5.5.9", 80, 0);
if(hConnect == NULL) {
printf("Error %u in WinHttpConnect.\n", GetLastError());
std::cin.get();
return 1;
}
// Power on
bool error = ping_request(hConnect, L"/bacpac/PW?t=goprohero&p=%01");
if(error) {
return 1;
}
Sleep(5000); //give time to boot up
//Clear memory
error = ping_request(hConnect, L"/camera/DA?t=goprohero");
if(error) {
return 1;
}
//.........这里部分代码省略.........
示例2: main_plate
int main_plate(const char * car_name)
{
/*********************************************准备工作*****************************************/
IplImage * img_car = NULL;
IplImage * img_car_after_resize = NULL;
IplImage * img_after_preprocess = NULL;
IplImage * img_plate = NULL;
IplImage * img_after_resize = NULL;
IplImage * img_character = NULL;
List rects; /*保存预选车牌位置矩形的列表*/
double scale = -1; /*在尺寸归一化时要用到*/
int width = 0, height = 0; /*最开始时候的尺寸归一化的长宽*/
int number = -1; /*最后一个字符的数字结果*/
int count_recog = 0;
char filename[50];
#if 1
//cvNamedWindow("img_car", 1);
// cvNamedWindow("img_car_after_resize", 1);
//cvNamedWindow("img_after_preprocess", 1);
//cvNamedWindow("img_plate", 1);
#endif
if ((img_car = cvLoadImage(car_name, -1)) == NULL) {
fprintf(stderr, "Can not open car image file in main.c!\n");
exit(-1);
}
/*****************************************开始进行图像处理***************************************/
/*由于得到的车辆图像中车占的比例太小,所以需要考虑重新截取图像,保证得到的图像中车辆整体占整个图像的比例较大
要实现这个目的我们观察发现拍到的照片中车基本都是处于整个图像的中心,所以我们截取整个图像的中心作为新的图片
策略:
1.先将图片按宽度分成三份,取中间的一份,车牌肯定在这一份中
2.将图片上四分之一截取掉,下四分之一截取点,车牌肯定在剩下的二分之一份图片中
*/
/*********现在开始进行截取车身操作****************/
#if 0
IplImage * tmp_img = cvCreateImage(cvSize(1.0 / 3 * img_car->width, 1.0 / 2 * img_car->height), img_car->depth, img_car->nChannels);
cvSetImageROI(img_car, cvRect(1.0 / 3 * img_car->width, 1.0 / 4 * img_car->height, 1.0 / 3 * img_car->width, 1.0 / 2 * img_car->height));
cvCopy(img_car, tmp_img);
cvSaveImage("tmp_img.bmp", tmp_img);
cvResetImageROI(img_car);
img_car = cvLoadImage("tmp_img.bmp", -1); /*img_car现在是新的截取后的图片了*/
assert(img_car != NULL);
cvNamedWindow("haha", 1);
cvShowImage("haha", tmp_img);
cvWaitKey(0);
#endif
cut_image(img_car);
img_car = cvLoadImage("image/tmp_img.bmp", -1); /*img_car现在是新的截取后的图片了*/
/********************************************************************************************************/
/*为了便于对图像进行统一处理,先对图像尺寸进行处理,让图像的尺寸大小合适,
一般大概大小为640*480规格的,所以只需要大概按照这个比例进行resize
*/
/*用cvResize函数进行处理即可*/
#if 1
scale = 1.0 * 640 / img_car->width; /*将长度规整为640即可,宽就按比例伸长就行了*/
width = scale * img_car->width;
height = scale * img_car->height;
img_car_after_resize = cvCreateImage(cvSize(width, height), img_car->depth, img_car->nChannels);
cvResize(img_car, img_car_after_resize); /*对尺寸进行归一化,得到宽为640的图像*/
cvSaveImage("image/img_car_after_resize.bmp", img_car_after_resize);
#endif
/*图像预处理:输入为尺寸归一化后的车牌图像,输出为一张img_after_preprocess.bmp图像*/
preprocess_car_img(img_car_after_resize);
/*读取img_after_preprocess.bmp图像*/
if ((img_after_preprocess = cvLoadImage("image/img_after_preprocess.bmp", -1)) == NULL) {
fprintf(stderr, "Can not open file img_after_preprocess.bmp in main.c");
exit(-1);
}
#if 1
/*显示预处理完成后的图像*/
//cvShowImage("img_car", img_after_preprocess);
//cvShowImage("img_after_preprocess", img_after_preprocess);
#endif
/***************************************预处理完成,开始找车牌位置*****************************************************************/
rects = get_location(img_after_preprocess, img_car_after_resize); /*得到车牌的位置,起初设计阶段是可以有多个预选位置,但是后来发现不用,所以rects其实只有一个位置,但是也是用一个链表装着的*/
/*由于在get_location中返回的是头结点的next节点,所以这里的参数不用rects->next*/
assert(count_node(rects) == 1); /*断言这个链表里只有一个待选车牌位置*/
/****************************************找到车牌位置,开始截取车牌******************************************************************/
get_plate_image(img_car_after_resize, rects); /*得到车牌的图像*/
img_plate = cvLoadImage("image/plate_img0.bmp", -1); /*上面那个函数中得到的plate_img.bmp图像*/
if (img_plate == NULL) {
fprintf(stderr, "Can not open plate image file!\n");
exit(-1);
}
//.........这里部分代码省略.........
示例3: main
int
main (int argc, char **argv)
{
int i;
int gui = true;
IplImage *src_img = 0, *src_gray = 0;
const char *cascade_name = "haarcascade_frontalface_default.xml";
CvHaarClassifierCascade *cascade = 0;
CvMemStorage *storage = 0;
CvSeq *faces;
static CvScalar colors[] = {
{{0, 0, 255}}, {{0, 128, 255}},
{{0, 255, 255}}, {{0, 255, 0}},
{{255, 128, 0}}, {{255, 255, 0}},
{{255, 0, 0}}, {{255, 0, 255}}
};
// (1)画像を読み込む
if (argc < 2 || (src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_COLOR)) == 0)
return -1;
if (argc == 3 && strcmp("--no-gui", argv[2]) == 0 )
gui = false;
src_gray = cvCreateImage (cvGetSize (src_img), IPL_DEPTH_8U, 1);
// (2)ブーストされた分類器のカスケードを読み込む
cascade = (CvHaarClassifierCascade *) cvLoad (cascade_name, 0, 0, 0);
// (3)メモリを確保し,読み込んだ画像のグレースケール化,ヒストグラムの均一化を行う
storage = cvCreateMemStorage (0);
cvClearMemStorage (storage);
cvCvtColor (src_img, src_gray, CV_BGR2GRAY);
cvEqualizeHist (src_gray, src_gray);
// (4)物体(顔)検出
faces = cvHaarDetectObjects (src_gray, cascade, storage, 1.11, 4, 0, cvSize (40, 40));
// puts("<faces>");
printf("[{\"faces\":");
// (5)検出された全ての顔位置に,円を描画する
for (i = 0; i < (faces ? faces->total : 0); i++) {
// puts(" <face>");
CvRect *r = (CvRect *) cvGetSeqElem (faces, i);
CvPoint center;
int radius;
// printf(" <top>%d</top>\n", r->y);
// printf(" <right>%d</right>\n", r->x + r->width);
// printf(" <bottom>%d</bottom>\n", r->y + r->height);
// printf(" <left>%d</left>\n", r->x);
puts("[{");
printf(" \"id\":%d,\n", 0);
printf(" \"x\":%d,\n", r->x);
printf(" \"y\":%d,\n", r->y);
printf(" \"w\":%d,\n", r->width);
printf(" \"h\":%d\n", r->height);
puts("}]");
if (i != faces->total - 1) {
puts(",");
}
center.x = cvRound (r->x + r->width * 0.5);
center.y = cvRound (r->y + r->height * 0.5);
radius = cvRound ((r->width + r->height) * 0.25);
cvCircle (src_img, center, radius, colors[i % 8], 3, 8, 0);
// puts(" </face>");
}
// puts("</faces>");
puts("}]");
// (6)画像を表示,キーが押されたときに終了
if (gui) {
cvNamedWindow ("Face Detection", CV_WINDOW_AUTOSIZE);
cvShowImage ("Face Detection", src_img);
cvWaitKey (0);
}
cvDestroyWindow ("Face Detection");
cvReleaseImage (&src_img);
cvReleaseImage (&src_gray);
cvReleaseMemStorage (&storage);
return 0;
}
示例4: detect_and_draw
void detect_and_draw(IplImage * img, IplImage * depth, IplImage *faceDepthRet, bool save)
{
int scale = 1;
// Create a new image based on the input image
IplImage *temp =
cvCreateImage(cvSize(img->width / scale, img->height / scale), 8, 3);
memcpy(temp->imageData, img->imageData, 640 * 480 * 3);
IplImage *depthTemp =
cvCreateImage(cvSize(img->width / scale, img->height / scale), 16, 1);
memcpy(depthTemp->imageData, depth->imageData, 640 * 480 * 2);
IplImage *faceDepth =
cvCreateImage(cvSize(img->width / scale, img->height / scale), 16, 1);
// Create two points to represent the face locations
CvPoint pt1, pt2;
int i, j, k;
// Clear the memory storage which was used before
cvClearMemStorage(storage);
// Find whether the cascade is loaded, to find the faces. If yes, then:
if (cascade)
{
// There can be more than one face in an image. So create a growable
// sequence of faces.
// Detect the objects and store them in the sequence
/* CvSeq* faces = cvHaarDetectObjects( temp, cascade, storage, 1.1, 2,
CV_HAAR_DO_CANNY_PRUNING, cvSize(40, 40) ); */
CvSeq *faces = cvHaarDetectObjects(temp, cascade, storage,
1.6, 2, CV_HAAR_DO_CANNY_PRUNING,
cvSize(40, 40));
// Loop the number of faces found.
for (i = 0; i < (faces ? faces->total : 0); i++)
{
// Create a new rectangle for drawing the face
CvRect *r = (CvRect *) cvGetSeqElem(faces, i);
// Find the dimensions of the face,and scale it if necessary
pt1.x = r->x * scale;
pt2.x = (r->x + r->width) * scale;
pt1.y = r->y * scale;
pt2.y = (r->y + r->height) * scale;
// Draw the rectangle in the input image
cvRectangle(temp, pt1, pt2, CV_RGB(0, 0, 255), 3, 8, 0);
cvRectangle(depthTemp, pt1, pt2, CV_RGB(0, 0, 255), 3, 8, 0);
cvSetImageROI(depth,
cvRect(pt1.x, pt1.y, r->width * scale,
r->height * scale));
IplImage *faceDepthTemp =
cvCreateImage(cvGetSize(depth), depth->depth,
depth->nChannels);
IplImage *faceDepthTemp2 =
cvCreateImage(cvGetSize(depth), 8,
depth->nChannels);
cvCopy(depth, faceDepthTemp, NULL);
cvResetImageROI(depth);
// Maximize standard deviation.
//stretchFaceDepth(faceDepthTemp);
cvResize(faceDepthTemp, faceDepth);
cvConvertScale(faceDepthTemp, faceDepthTemp2, 1.0/256.0, 0);
cvResize(faceDepthTemp2, faceDepthRet);
cvReleaseImage(&faceDepthTemp);
if (save)
{
FILE *csvFile = fopen("face.csv", "w");
for (j = pt1.y; j < pt2.y; j++)
{
for (k = pt1.x; k < pt2.x; k++)
{
fprintf(csvFile, "%u,",
(((uint16_t *) (depth->imageData)) +
j * depth->width)[k]);
}
fprintf(csvFile, "\n");
}
printf("Face captured!\n");
fclose(csvFile);
}
}
}
// Show the image in the window named "result"
cvShowImage("result", temp);
cvShowImage("resultDepth", depthTemp);
cvShowImage("faceDepth", faceDepth);
//.........这里部分代码省略.........
示例5: main
//.........这里部分代码省略.........
//LEARNING THE CODEBOOK BACKGROUND
pColor = (uchar *)((yuvImage)->imageData);
for(int c=0; c<imageLen; c++)
{
cvupdateCodeBook(pColor, cB[c], cbBounds, nChannels);
pColor += 3;
}
}
//When done, create the background model
if(i == endcapture){
createModelsfromStats();
}
//Find the foreground if any
if(i >= endcapture) {
//FIND FOREGROUND BY AVG METHOD:
backgroundDiff(rawImage,ImaskAVG);
cvCopy(ImaskAVG,ImaskAVGCC);
cvconnectedComponents(ImaskAVGCC);
//FIND FOREGROUND BY CODEBOOK METHOD
uchar maskPixelCodeBook;
pColor = (uchar *)((yuvImage)->imageData); //3 channel yuv image
uchar *pMask = (uchar *)((ImaskCodeBook)->imageData); //1 channel image
for(int c=0; c<imageLen; c++)
{
maskPixelCodeBook = cvbackgroundDiff(pColor, cB[c], nChannels, minMod, maxMod);
*pMask++ = maskPixelCodeBook;
pColor += 3;
}
//This part just to visualize bounding boxes and centers if desired
cvCopy(ImaskCodeBook,ImaskCodeBookCC);
cvconnectedComponents(ImaskCodeBookCC);
}
//Display
cvShowImage( "Raw", rawImage );
cvShowImage( "AVG_ConnectComp",ImaskAVGCC);
cvShowImage( "ForegroundAVG",ImaskAVG);
cvShowImage( "ForegroundCodeBook",ImaskCodeBook);
cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);
//USER INPUT:
c = cvWaitKey(10)&0xFF;
//End processing on ESC, q or Q
if(c == 27 || c == 'q' | c == 'Q')
break;
//Else check for user input
switch(c)
{
case 'h':
help();
break;
case 'p':
pause ^= 1;
break;
case 's':
singlestep = 1;
pause = false;
break;
case 'r':
pause = false;
singlestep = false;
break;
//AVG BACKROUND PARAMS
case '-':
if(i > endcapture){
scalehigh += 0.25;
printf("AVG scalehigh=%f\n",scalehigh);
示例6: zgomot_sare_si_piper
void zgomot_sare_si_piper( IplImage *img )
{
int i, j, k;
int w, h;
int nivel_gri;
double e_zgomot; //energia zgomotului
double e_imagine; //energia imaginii
double SNR; //raportul semnal-zgomot
double nr = 0.1 ; //procentul de pixeli afectati de zgomot (10%)
CvScalar pixel;
CvScalar sare_piper;
IplImage *imagine_zgomot;
w = img->width;
h = img->height;
imagine_zgomot = cvCreateImage( cvSize( w, h ), IPL_DEPTH_8U, 1 );
e_imagine = 0;
e_zgomot = 0;
SNR = 0;
//se copiaza imaginea originala si se calculeaza energia acesteia
for( i = 0; i < h; i++ )
for( j = 0; j < w; j++ )
{
pixel = cvGet2D( img, i, j );
nivel_gri = (int)( pixel.val[ 0 ] );
e_imagine += nivel_gri * nivel_gri;
cvSet2D( imagine_zgomot, i, j, pixel );
}
srand( rand() );
k = 0;
while( k < ( int )( w * h * nr ) )
{
//se genereaza in mod aleator coordonatele pixelului afectat de zgomot
i = ( int )( 1. * h * rand() / ( RAND_MAX + 1. ) );
j = ( int )( 1. * w * rand() / ( RAND_MAX + 1. ) );
if( (i >= 0) && (i < h) && (j >= 0) && (j < w) )
{
//se decide daca zgomotul este "sare" sau "piper"
if( ( 100. * rand() / ( RAND_MAX + 1. ) ) >= 50 )
{
sare_piper.val[ 0 ] = 255;
pixel = cvGet2D( img, i, j );
nivel_gri = (int)( pixel.val[ 0 ] );
e_zgomot += (255 - nivel_gri) * (255 - nivel_gri);
}
else
{
sare_piper.val[ 0 ] = 0;
pixel = cvGet2D( img, i, j );
nivel_gri = (int)( pixel.val[ 0 ] );
e_zgomot += nivel_gri * nivel_gri;
}
cvSet2D( imagine_zgomot, i, j, sare_piper );
k++;
}
}
SNR = 20 * log( e_imagine / e_zgomot );
printf( "Zgomot impulsiv de tip sare si piper, SNR = %6.3lf dB\n", SNR );
cvNamedWindow( "Imagine + zgomot sare si piper", 1 );
cvShowImage( "Imagine + zgomot sare si piper", imagine_zgomot );
cvSaveImage( "imagine_zgomot.png", imagine_zgomot );
cvWaitKey(0);
cvReleaseImage( &imagine_zgomot );
cvDestroyWindow( "Imagine + zgomot sare si piper" );
}
示例7: Run
void Run()
{
int w, h;
IplImage *pCapImage;
PBYTE pCapBuffer = NULL;
// Create camera instance
_cam = CLEyeCreateCamera(_cameraGUID, _mode, _resolution, _fps);
if(_cam == NULL) return;
// Get camera frame dimensions
CLEyeCameraGetFrameDimensions(_cam, w, h);
// Depending on color mode chosen, create the appropriate OpenCV image
if(_mode == CLEYE_COLOR_PROCESSED || _mode == CLEYE_COLOR_RAW)
pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 4);
else
pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
// Set some camera parameters
//CLEyeSetCameraParameter(_cam, CLEYE_GAIN, 30);
//CLEyeSetCameraParameter(_cam, CLEYE_EXPOSURE, 500);
//CLEyeSetCameraParameter(_cam, CLEYE_AUTO_EXPOSURE, false);
//CLEyeSetCameraParameter(_cam, CLEYE_AUTO_GAIN, false);
//CLEyeSetCameraParameter(_cam, CLEYE_AUTO_WHITEBALANCE, false);
//CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_RED, 100);
//CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_BLUE, 200);
//CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_GREEN, 200);
// Start capturing
CLEyeCameraStart(_cam);
CvMemStorage* storage = cvCreateMemStorage(0);
IplImage* hsv_frame = cvCreateImage(cvSize(pCapImage->width, pCapImage->height), IPL_DEPTH_8U, 3);
IplImage* thresholded = cvCreateImage(cvSize(pCapImage->width, pCapImage->height), IPL_DEPTH_8U, 1);
IplImage* temp = cvCreateImage(cvSize(pCapImage->width >> 1, pCapImage->height >> 1), IPL_DEPTH_8U, 3);
// Create a window in which the captured images will be presented
cvNamedWindow( "Camera" , CV_WINDOW_AUTOSIZE );
cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE );
//int hl = 100, hu = 115, sl = 95, su = 135, vl = 115, vu = 200;
int hl = 5, hu = 75, sl = 40, su = 245, vl = 105, vu = 175;
// image capturing loop
while(_running)
{
// Detect a red ball
CvScalar hsv_min = cvScalar(hl, sl, vl, 0);
CvScalar hsv_max = cvScalar(hu, su, vu, 0);
cvGetImageRawData(pCapImage, &pCapBuffer);
CLEyeCameraGetFrame(_cam, pCapBuffer);
cvConvertImage(pCapImage, hsv_frame);
// Get one frame
if( !pCapImage )
{
fprintf( stderr, "ERROR: frame is null...\n" );
getchar();
break;
}
// Covert color space to HSV as it is much easier to filter colors in the HSV color-space.
cvCvtColor(pCapImage, hsv_frame, CV_RGB2HSV);
// Filter out colors which are out of range.
cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
// Memory for hough circles
CvMemStorage* storage = cvCreateMemStorage(0);
// hough detector works better with some smoothing of the image
cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 );
CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 2,
thresholded->height/4, 100, 50, 10, 400);
for (int i = 0; i < circles->total; i++)
{
float* p = (float*)cvGetSeqElem( circles, i );
//printf("Ball! x=%f y=%f r=%f\n\r",p[0],p[1],p[2] );
cvCircle( pCapImage, cvPoint(cvRound(p[0]),cvRound(p[1])),
3, CV_RGB(0,255,0), -1, 8, 0 );
cvCircle( pCapImage, cvPoint(cvRound(p[0]),cvRound(p[1])),
cvRound(p[2]), CV_RGB(255,0,0), 3, 8, 0 );
}
cvShowImage( "Camera", pCapImage ); // Original stream with detected ball overlay
cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space
cvShowImage( "EdgeDetection", thresholded ); // The stream after color filtering
cvReleaseMemStorage(&storage);
//.........这里部分代码省略.........
示例8: main
int main(int argc, char* argv[])
{
// necessary variables..
int erode_level = 11;
char key='q';
char s_erode_level[20] ;
char img_text[]="erode level: ";
// image container...
IplImage* img;
// Font obj for setting text font...
CvFont font;
// initialize font object...
cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX,
1.0, 1.0, 0, 1, CV_AA);
// starts the window thread...
// is necessary to destroy window...
cvStartWindowThread();
// do...while loop starts here
do{
img = cvLoadImage("03.jpg", CV_LOAD_IMAGE_UNCHANGED);
if (!img) {
printf("Error: Could not open the image file! \n");
exit(1);
}
// get erode level from user
printf("Enter the level of eroding: ");
scanf("%d", &erode_level);
// set limits 0-120
if(erode_level<=0)
erode_level=1;
else if(erode_level>120)
erode_level=119;
// erode the image...
cvErode(img, img, NULL,erode_level);
/*
*
* Similarly, dilation can be perfomed...
* cvDilate(img,img,NULL,erode_level);
*
*/
// write erode_level integer to String
sprintf(s_erode_level, "%d", erode_level) ;
// Concat "erode Level: " and s_erode_level(actual value)...
strcpy(img_text,"erode level: ");
strcat(img_text,s_erode_level);
// write text to image
cvPutText(img, img_text, cvPoint(10, 50),
&font, cvScalar(255, 255, 255, 0));
// display image in the "Eroded" window...
cvNamedWindow("Eroded", CV_WINDOW_AUTOSIZE);
cvShowImage("Eroded", img);
// save to eroded.jpg...
cvSaveImage("eroded.jpg", img, 0);
// get input from user
key = cvWaitKey(0);
// destroy window and release image...
cvDestroyWindow("Eroded");
cvReleaseImage( &img );
// if 'q' is pressed by user (key)
// break out of loop
// else continue...
}while(key!='q');
// end of do...while loop
return 0;
}// end of main
示例9: cvCheckChessboard
// does a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
int cvCheckChessboard(IplImage* src, CvSize size)
{
if(src->nChannels > 1)
{
cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only",
__FILE__, __LINE__);
}
if(src->depth != 8)
{
cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only",
__FILE__, __LINE__);
}
const int erosion_count = 1;
const float black_level = 20.f;
const float white_level = 130.f;
const float black_white_gap = 70.f;
#if defined(DEBUG_WINDOWS)
cvNamedWindow("1", 1);
cvShowImage("1", src);
cvWaitKey(0);
#endif //DEBUG_WINDOWS
CvMemStorage* storage = cvCreateMemStorage();
IplImage* white = cvCloneImage(src);
IplImage* black = cvCloneImage(src);
cvErode(white, white, NULL, erosion_count);
cvDilate(black, black, NULL, erosion_count);
IplImage* thresh = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
int result = 0;
for(float thresh_level = black_level; thresh_level < white_level && !result; thresh_level += 20.0f)
{
cvThreshold(white, thresh, thresh_level + black_white_gap, 255, CV_THRESH_BINARY);
#if defined(DEBUG_WINDOWS)
cvShowImage("1", thresh);
cvWaitKey(0);
#endif //DEBUG_WINDOWS
CvSeq* first = 0;
std::vector<std::pair<float, int> > quads;
cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
icvGetQuadrangleHypotheses(first, quads, 1);
cvThreshold(black, thresh, thresh_level, 255, CV_THRESH_BINARY_INV);
#if defined(DEBUG_WINDOWS)
cvShowImage("1", thresh);
cvWaitKey(0);
#endif //DEBUG_WINDOWS
cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
icvGetQuadrangleHypotheses(first, quads, 0);
const size_t min_quads_count = size.width*size.height/2;
std::sort(quads.begin(), quads.end(), less_pred);
// now check if there are many hypotheses with similar sizes
// do this by floodfill-style algorithm
const float size_rel_dev = 0.4f;
for(size_t i = 0; i < quads.size(); i++)
{
size_t j = i + 1;
for(; j < quads.size(); j++)
{
if(quads[j].first/quads[i].first > 1.0f + size_rel_dev)
{
break;
}
}
if(j + 1 > min_quads_count + i)
{
// check the number of black and white squares
std::vector<int> counts;
countClasses(quads, i, j, counts);
const int black_count = cvRound(ceil(size.width/2.0)*ceil(size.height/2.0));
const int white_count = cvRound(floor(size.width/2.0)*floor(size.height/2.0));
if(counts[0] < black_count*0.75 ||
counts[1] < white_count*0.75)
{
continue;
}
result = 1;
break;
}
}
}
//.........这里部分代码省略.........
示例10: display
void display()
{
// capture from camera
IplImage* grabImage = cvRetrieveFrame(capture);
if(flip)
cvFlip(grabImage, grabImage);
cvResize(grabImage, resizeImage);
cvCvtColor(resizeImage, grayImage, CV_BGR2GRAY);
cvCopyImage(resizeImage, resultImage);
// update camera pose
tracker->UpdateCamerapose(grayImage);
tracker->DrawDebugInfo(resultImage);
tracker->DrawOutLine(resultImage, true);
tracker->GetCameraParameter()->DrawInfomation(resultImage, WIDTH/4);
int matchingCount = tracker->GetMatchingCount();
// adaptive threshold
int localcount = tracker->GetDetector()->GetKeypointsCount();
if(keypointCount != localcount) // if updated
{
if(localcount > FEATURE_COUNT)
threshold += 1;
if(localcount < FEATURE_COUNT)
threshold -= 1;
keypointCount = localcount;
tracker->GetDetector()->SetThreshold(threshold);
}
// calculate fps
fpsStep++;
if(fpsStep >= FPS_UPDATE_STEP)
{
fps = logging->calculateFPS()*(double)FPS_UPDATE_STEP;
logging->updateTickCount();
fpsStep = 0;
}
char message[100];
sprintf_s(message, "FPS : %.2lf", fps);
windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 20), 0.6, message);
sprintf_s(message, "Feature Count : %d, Threshold : %.0lf", keypointCount, threshold);
windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 40), 0.6, message);
sprintf_s(message, "Matching Count : %d", matchingCount);
windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 60), 0.6, message);
sprintf_s(message, "Press 'Space' to track the current image");
windage::Utils::DrawTextToImage(resultImage, cvPoint(WIDTH-270, HEIGHT-10), 0.5, message);
sprintf_s(message, "Press 'F' to flip image");
windage::Utils::DrawTextToImage(resultImage, cvPoint(WIDTH-270, HEIGHT-25), 0.5, message);
cvShowImage("tracking information window", resultImage);
// clear screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// draw virtual object
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
double radian = ANGLE * CV_PI / 180.0;
double dx = sin(radian) * VIRTUAL_CAMERA_DISTANCE;
double dy = cos(radian) * VIRTUAL_CAMERA_DISTANCE;
gluLookAt(dx, dy, 2000, 0.0, 0.0, 600.0, 0.0, 0.0, 1.0);
glPushMatrix();
{
// draw reference image & coordinate
renderer->DrawReference((double)WIDTH, (double)HEIGHT);
renderer->DrawAxis((double)WIDTH / 4.0);
// draw camera image & position
renderer->DrawCamera(tracker->GetCameraParameter(), resizeImage);
}
glPopMatrix();
glutSwapBuffers();
}
示例11: trace
void MainWindow::stereoVisionTest(QString image_path,int cornersX,int cornersY){
trace("stereoVisionTest ... ");
StereoVision* sv = new StereoVision(CAM_WIDTH,CAM_HEIGHT);
IplImage* images[2];
//perform calibration based on sets of 2 images (chessboard)
sv->calibrationStart(cornersX,cornersY);
// READ IN THE LIST OF CHESSBOARDS:
QString file_name;
for(int i=0;;i++){
for(int lr=0;lr<2;lr++){
file_name = tr("%1%2%3.jpg").arg(image_path).arg(i).arg(lr ? 'L' : 'R');
trace(file_name);
images[lr] = cvLoadImage( file_name.toLatin1(), 0 );
}
if(images[0] && images[1]){
trace(file_name);
sv->calibrationAddSample(images[0],images[1]);
for(int lr=0;lr<2;lr++)
cvReleaseImage(&images[lr]);
}else{
break;
}
}
sv->calibrationEnd();
sv->calibrationSave("stereovisiontest.dat");
//Once saved calibartion data can be loaded later with:
//sv->calibrationLoad("stereovisiontest.dat");
//test our calibartion with first image set:
CvSize imageSize = sv->getImageSize();
for(int lr=0;lr<2;lr++){
QString file_name = tr("%1%2%3.jpg").arg(image_path).arg(0).arg(lr ? 'L' : 'R');
images[lr] = cvLoadImage( file_name.toLatin1(), 0 );
}
//this method will compute sv->imagesRectified[0],sv->imagesRectified[1],sv->imageDepth,sv->imageDepthNormalized
sv->stereoProcess(images[0],images[1]);
//merge 2 rectified images in one image (pair) and display horizontal lines
//to evaluate rectification.
CvMat* pair = cvCreateMat( imageSize.height, imageSize.width*2,CV_8UC3 );
CvMat part;
cvGetCols( pair, &part, 0, imageSize.width );
cvCvtColor( sv->imagesRectified[0], &part, CV_GRAY2BGR );
cvGetCols( pair, &part, imageSize.width,imageSize.width*2 );
cvCvtColor( sv->imagesRectified[1], &part, CV_GRAY2BGR );
for(int j = 0; j < imageSize.height; j += 16 )
cvLine( pair, cvPoint(0,j),cvPoint(imageSize.width*2,j),CV_RGB(0,255,0));
//display the results
cvNamedWindow( "rectified", 1 );
cvShowImage( "rectified", pair );
cvNamedWindow( "depth", 1 );
cvShowImage( "depth", sv->imageDepthNormalized);
//free up memory
cvReleaseImage(&images[0]);
cvReleaseImage(&images[1]);
cvReleaseMat(&pair);
}
示例12: main
int main(void)
{
/* Input video dari kamera */
CvCapture* input_video;
input_video = cvCaptureFromCAM(CV_CAP_ANY);
/* cek keserdiaan kamera */
if (input_video == NULL)
{
fprintf(stderr, "Error: Kamera tidak terdeteksi.\n");
return -1;
}
/* mengambil frame dari video */
cvQueryFrame( input_video );
/* mengambil properti dari video */
CvSize frame_size;
frame_size.height = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT );
frame_size.width = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH );
/* membuat window baru bernama optical flow */
cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);
long current_frame = 0;
while(1)
{
static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C = NULL, *eig_image = NULL, *temp_image = NULL, *pyramid1 = NULL, *pyramid2 = NULL;
/* mendapatkan frame selanjutnya */
frame = cvQueryFrame( input_video );
/* mengalokasikan gambar */
allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame1_1C, 0);
allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
cvConvertImage(frame, frame1, 0);
/* mendapatkan frame ke dua */
frame = cvQueryFrame( input_video );
allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame2_1C, 0);
/* Shi and Tomasi Feature Tracking! */
/* mengalokasikan gambar */
allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );
/* Preparation: This array will contain the features found in frame 1. */
CvPoint2D32f frame1_features[400];
/* menginisialisasi jumlah feature / garis panah */
int number_of_features;
number_of_features = 400;
/* menjalankan algoritma Shi dan Tomasi */
cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, & number_of_features, .01, .01, NULL);
/* Pyramidal Lucas Kanade Optical Flow! */
/* menyimpan lokasi poin dari frame 1 di frame 2 dalam array */
CvPoint2D32f frame2_features[400];
char optical_flow_found_feature[400];
float optical_flow_feature_error[400];
CvSize optical_flow_window = cvSize(3,3);
CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );
allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );
/* menjalakan Pyramidal Lucas Kanade Optical Flow */
cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, number_of_features, optical_flow_window, 5, optical_flow_found_feature, optical_flow_feature_error, optical_flow_termination_criteria, 0 );
/* membuat panah */
for(int i = 0; i < number_of_features; i++)
{
/* skip bila tidak ada feature */
if ( optical_flow_found_feature[i] == 0 ) continue;
int line_thickness; line_thickness = 1;
/* warna garis */
CvScalar line_color; line_color = CV_RGB(255,0,0);
/* menggambarkan panah */
CvPoint p,q;
p.x = (int) frame1_features[i].x;
p.y = (int) frame1_features[i].y;
q.x = (int) frame2_features[i].x;
q.y = (int) frame2_features[i].y;
double angle; angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
double hypotenuse; hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );
q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
q.y = (int) (p.y - 3 * hypotenuse * sin(angle));
cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
p.x = (int) (q.x + 9 * cos(angle + pi / 4));
p.y = (int) (q.y + 9 * sin(angle + pi / 4));
cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
p.x = (int) (q.x + 9 * cos(angle - pi / 4));
p.y = (int) (q.y + 9 * sin(angle - pi / 4));
cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
}
/* menampilkan gambar */
cvShowImage("Optical Flow", frame1);
/* keluar */
int key_pressed;
key_pressed = cvWaitKey(10);
if (key_pressed == 27) break;
//.........这里部分代码省略.........
示例13: cvCloneImage
void ConformalResizing::ShowConstrains(const IplImage *pBackGround,
const vector<ConstrainUnits>& quads,
const vector<ConstrainUnits>& qaud5s,
const vector<ConstrainUnits>& edges,
const char *winName /* = */,
const int waite /* = 0 */,
const char* saveName /* = NULL */)
{
IplImage* pMixedImg = cvCloneImage(pBackGround);
cvNamedWindow(winName);
// Show quads
for (size_t i = 0; i < quads.size(); i++)
{
CvPoint pnts[4];
for (int j = 0; j < 4; j++)
{
pnts[j].x = (int)(quads[i].pnts[j].x);
pnts[j].y = (int)(quads[i].pnts[j].y);
}
cvLineAA(pMixedImg, pnts[0], pnts[1], 255);
cvLineAA(pMixedImg, pnts[0], pnts[2], 255);
cvLineAA(pMixedImg, pnts[3], pnts[1], 255);
cvLineAA(pMixedImg, pnts[3], pnts[2], 255);
}
// Show qaud5s
for (size_t i = 0; i < qaud5s.size(); i++)
{
CvPoint pnts[5];
for (int j = 0; j < 5; j++)
{
pnts[j].x = (int)(qaud5s[i].pnts[j].x);
pnts[j].y = (int)(qaud5s[i].pnts[j].y);
}
cvLineAA(pMixedImg, pnts[0], pnts[1], 255);
cvLineAA(pMixedImg, pnts[0], pnts[2], 255);
cvLineAA(pMixedImg, pnts[3], pnts[1], 255);
cvLineAA(pMixedImg, pnts[3], pnts[2], 255);
cvLineAA(pMixedImg, pnts[0], pnts[4], 128);
cvLineAA(pMixedImg, pnts[1], pnts[4], 128);
cvLineAA(pMixedImg, pnts[2], pnts[4], 128);
cvLineAA(pMixedImg, pnts[3], pnts[4], 128);
}
// Show edges
for (size_t i = 0; i < edges.size(); i++)
{
CvScalar color;
if(ispg)
color=GenColor1(i);
else
color=GenColor(i);
//swap(color.val[0],color.val[2]);
for (int j = 0; j < edges[i].n; j++)
{
CvPoint point = cvPoint((int)(edges[i].pnts[j].x + 0.5), (int)(edges[i].pnts[j].y + 0.5));
//cvCircle(pMixedImg, point, 3, CmShow::gColors[i % CM_SHOW_COLOR_NUM], 2);
cvCircle(pMixedImg, point, 3, color, 2);
//cvCircle(pMixedImg, point, 3, CmShow::gColors[edges[i].ind[j] % CM_SHOW_COLOR_NUM], 2);
}
}
cvNamedWindow(winName);
cvShowImage(winName, pMixedImg);
if (saveName != NULL)
cvSaveImage(saveName, pMixedImg);
cvReleaseImage(&pMixedImg);
}
示例14: cvLoadImage
CvSize ConformalResizing::GetConstrainUnits(const IplImage* srcImg32F,
const IplImage* img8U3C,
const CvSize szGrid,
vector<ConstrainUnits>& quads,
vector<ConstrainUnits>& qaud5s,
vector<ConstrainUnits>& edges,
vector<double>& ppos,/*added 2009.08.16*/
int meshQuadSize)
{
// Get importance map
//IplImage* impImg32F = cvCreateImage(cvGetSize(srcImg32F), IPL_DEPTH_32F, 1);
//cvScale(srcImg32F, impImg32F);
IplImage* impImg32F = NULL;
if (strlen(FileNames::impName) > 0)
{
IplImage* impMap = cvLoadImage(FileNames::impName, CV_LOAD_IMAGE_GRAYSCALE);
//if (impMap != NULL)
{
NormalizeImg(impMap, meshQuadSize);
impImg32F = cvCreateImage(cvGetSize(impMap), IPL_DEPTH_32F, 1);
cvScale(impMap, impImg32F, 1/255.0);
cvReleaseImage(&impMap);
cvNamedWindow("Importance");
cvShowImage("Importance", impImg32F);
cvAddS(impImg32F, cvScalarAll(gSet("minWeight")), impImg32F);
}
}
CmImportance imp;
if (impImg32F == NULL)
{
double weights[5];
weights[0] = gSet("edgeWeight");
weights[1] = gSet("faceWeight");
weights[2] = gSet("motionWeight");
weights[3] = gSet("contrastWeight");
weights[4] = gSet("minWeight");
impImg32F = imp.calcEnergy(img8U3C, weights);
imp.showEnergy();
}
{
IplImage* impSave = cvCreateImage(cvGetSize(impImg32F), IPL_DEPTH_8U, 1);
cvScale(impImg32F, impSave, 255);
//cvSaveImage(FileNames::outImp, impSave);
cvReleaseImage(&impSave);
}
//#ifdef _DEBUG
// cvSave("impd.xml", img8U3C, "impImg32F");
//#else
// cvSave("imp.xml", img8U3C, "impImg32F");
//#endif // _DEBUG
//
IplImage *pGridNodeX64F, *pGridNodeY64F;
CmCvHelper::MeshGrid(pGridNodeX64F, pGridNodeY64F, 0, srcImg32F->width, 0, srcImg32F->height, meshQuadSize, meshQuadSize);
double (*pGridPos)[2] = new double[szGrid.width * szGrid.height][2]; //Original edge point position within each grid. (x, y)
int *pGridIdx = new int[szGrid.width * szGrid.height]; // Index of grid point variable
int *pGridIdxE = new int[szGrid.width * szGrid.height]; // Index of edge contain this grid point
typedef vector<pair<int, int>> EdgePos; // Position of edge point in grid
vector<EdgePos> edgePntPos;
int varaInd = (szGrid.height + 1) * (szGrid.width + 1);
/*added 2009.08.16*/
{
ppos.reserve(varaInd*2);
for(int y=0;y<=szGrid.height;y++)
for(int x=0;x<=szGrid.width;x++)
{
ppos.push_back(x*meshQuadSize);//x
ppos.push_back(y*meshQuadSize);//y
}
}
{
//Get Edges
const IplImage* pLineInd;
vector<CEdge> edge;
CDetectEdge detEdge(edge, gSet("Sigma"));
detEdge.Initial(srcImg32F);
detEdge.CalFirDer();
detEdge.NoneMaximalSuppress((float)gSet("LinkEndBound"), (float)gSet("LinkStartBound"));
detEdge.Link(gSet["ShortRemoveBound"]);
pLineInd = detEdge.LineIdx();
int* pTmp = pGridIdx; // Borrow memory inside
memset(pTmp, 0xff, szGrid.width * szGrid.height * sizeof(int));
memset(pGridIdxE, 0xff, szGrid.width * szGrid.height * sizeof(int));
for (int y = 0; y < srcImg32F->height; y++)
{
int* lineIdx = (int*)(pLineInd->imageData + pLineInd->widthStep * y);
for (int x = 0; x < srcImg32F->width; x++)
{
if (lineIdx[x] > 0) // it's an edge point
{
int dx = x % meshQuadSize;
dx = min(dx, meshQuadSize - dx);
//.........这里部分代码省略.........
示例15: cvShowManyImages
//.........这里部分代码省略.........
int max;
// If the number of arguments is lesser than 0 or greater than 12
// return without displaying
if(nArgs <= 0) {
printf("Number of arguments too small....\n");
return;
}
else if(nArgs > 12) {
printf("Number of arguments too large....\n");
return;
}
// Determine the size of the image,
// and the number of rows/cols
// from number of arguments
else if (nArgs == 1) {
w = h = 1;
size = 300;
}
else if (nArgs == 2) {
w = 2; h = 1;
size = 300;
}
else if (nArgs == 3 || nArgs == 4) {
w = 2; h = 2;
size = 300;
}
else if (nArgs == 5 || nArgs == 6) {
w = 3; h = 2;
size = 200;
}
else if (nArgs == 7 || nArgs == 8) {
w = 4; h = 2;
size = 200;
}
else {
w = 4; h = 3;
size = 150;
}
// Create a new 3 channel image
DispImage = cvCreateImage( cvSize(100 + size*w, 60 + size*h), 8, 3 );
// Used to get the arguments passed
va_list args;
va_start(args, nArgs);
// Loop for nArgs number of arguments
for (i = 0, m = 20, n = 20; i < nArgs; i++, m += (20 + size)) {
// Get the Pointer to the IplImage
img = va_arg(args, IplImage*);
// Check whether it is NULL or not
// If it is NULL, release the image, and return
if(img == 0) {
printf("Invalid arguments");
cvReleaseImage(&DispImage);
return;
}
// Find the width and height of the image
x = img->width;
y = img->height;
// Find whether height or width is greater in order to resize the image
max = (x > y)? x: y;
// Find the scaling factor to resize the image
scale = (float) ( (float) max / size );
// Used to Align the images
if( i % w == 0 && m!= 20) {
m = 20;
n+= 20 + size;
}
// Set the image ROI to display the current image
cvSetImageROI(DispImage, cvRect(m, n, (int)( x/scale ), (int)( y/scale )));
// Resize the input image and copy the it to the Single Big Image
cvResize(img, DispImage);
// Reset the ROI in order to display the next image
cvResetImageROI(DispImage);
}
// Create a new window, and show the Single Big Image
cvNamedWindow( title, 1 );
cvShowImage( title, DispImage);
cvWaitKey();
cvDestroyWindow(title);
// End the number of arguments
va_end(args);
// Release the Image Memory
cvReleaseImage(&DispImage);
}