本文整理汇总了C++中VideoCapture::set方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoCapture::set方法的具体用法?C++ VideoCapture::set怎么用?C++ VideoCapture::set使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoCapture
的用法示例。
在下文中一共展示了VideoCapture::set方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char** argv)
{
int flag_use_image = 0;
if( argc != 2 )
{
std::cout<< "Usage: ./init num" << std::endl;
std::cout<< "num: 0 - image" << std::endl
<< " 1 - video" << std::endl
<< " 2 - dataset" << std::endl;
return -1;
}
else
{
std::string val = argv[1];
if(val == "0")
{
}
else if(val == "1")
{
flag_use_image = 1;
}
else if(val == "2")
{
flag_use_image = 2;
}
else
{
std::cout<< "num error" << std::endl;
}
}
std::string winName = "Image";
namedWindow(winName, WINDOW_NORMAL);
mat_canvas = imread( "data/book.jpg");
if(mat_canvas.data == NULL)
{
std::cout<< "Image is not opened." << std::endl;
return -1;
}
if(flag_use_image == 0)
{
setMouseCallback(winName, mouseEvent);
// // write mat to file
// std::string fileName = "mat_descriptors.yml";
// FileStorage fs(fileName, FileStorage::WRITE);
// fs << "descriptors" << mat_descriptors;
// fs.release();
// std::cout<< fileName << " is generated." << std::endl;
// Mat copy;
// FileStorage fs2("mat_descriptors.yml", FileStorage::READ);
// fs2["descriptors"] >> copy;
// fs2.release();
// FileStorage fs3("test.yml", FileStorage::WRITE);
// fs3 << "descriptors" << copy;
// fs3.release();
//////////////////////////////////////////////////////////
// std::vector<cv::Point3f> vec_pois;
// vec_pois.push_back(Point3f(0, 0, 0));
// vec_pois.push_back(Point3f(1.1, 0.1, 0));
// vec_pois.push_back(Point3f(0.3, 2.1, 0));
// vec_pois.push_back(Point3f(7.3, 2, 0));
// vec_pois.push_back(Point3f(1.3, 4.1, 0));
// FileStorage fs3("POIs.yml", FileStorage::WRITE);
// fs3 << "POIs" << vec_pois;
// fs3.release();
//////////////////////////////////////////////////////////
while(1)
{
imshow(winName, mat_canvas );
waitKey(30);
}
}
//-- use dataset
else if(flag_use_image == 2)
{
//.........这里部分代码省略.........
示例2: assessment
static AssessmentRes assessment(char* video,char* gt_str, char* algorithms[],char* initBoxes_str[],int algnum){
char buf[200];
int start_frame=0;
int linecount=0;
Rect2d boundingBox;
vector<double> averageMillisPerFrame(algnum,0.0);
static int videoNum=0;
videoNum++;
FILE* gt=fopen(gt_str,"r");
if(gt==NULL){
printf("cannot open the ground truth file %s\n",gt_str);
exit(EXIT_FAILURE);
}
for(linecount=0;fgets(buf,sizeof(buf),gt)!=NULL;linecount++);
if(linecount==0){
printf("ground truth file %s has no lines\n",gt_str);
exit(EXIT_FAILURE);
}
fseek(gt,0,SEEK_SET);
if(fgets(buf,sizeof(buf),gt)==NULL){
printf("ground truth file %s has no lines\n",gt_str);
exit(EXIT_FAILURE);
}
std::vector<Rect2d> initBoxes(algnum);
for(int i=0;i<algnum;i++){
printf("%s %s\n",algorithms[i],initBoxes_str[CMDLINEMAX*i]);
if(lineToRect(initBoxes_str[CMDLINEMAX*i],boundingBox)<0){
printf("please, specify bounding box for video %s, algorithm %s\n",video,algorithms[i]);
printf("FYI, initial bounding box in ground truth is %s\n",buf);
if(gt!=NULL){
fclose(gt);
}
exit(EXIT_FAILURE);
}else{
initBoxes[i].x=boundingBox.x;
initBoxes[i].y=boundingBox.y;
initBoxes[i].width=boundingBox.width;
initBoxes[i].height=boundingBox.height;
}
}
VideoCapture cap;
cap.open( String(video) );
cap.set( CAP_PROP_POS_FRAMES, start_frame );
if( !cap.isOpened() ){
printf("cannot open video %s\n",video);
help();
}
Mat frame;
namedWindow( "Tracking API", 1 );
std::vector<Ptr<Tracker> >trackers(algnum);
for(int i=0;i<algnum;i++){
trackers[i] = Tracker::create( algorithms[i] );
if( trackers[i] == NULL ){
printf("error in the instantiation of the tracker %s\n",algorithms[i]);
if(gt!=NULL){
fclose(gt);
}
exit(EXIT_FAILURE);
}
}
cap >> frame;
frame.copyTo( image );
if(lineToRect(buf,boundingBox)<0){
if(gt!=NULL){
fclose(gt);
}
exit(EXIT_FAILURE);
}
rectangle( image, boundingBox,palette[0], 2, 1 );
for(int i=0;i<(int)trackers.size();i++){
rectangle(image,initBoxes[i],palette[i+1], 2, 1 );
if( !trackers[i]->init( frame, initBoxes[i] ) ){
printf("could not initialize tracker %s with box %s at video %s\n",algorithms[i],initBoxes_str[i],video);
if(gt!=NULL){
fclose(gt);
}
exit(EXIT_FAILURE);
}
}
imshow( "Tracking API", image );
int frameCounter = 0;
AssessmentRes res((int)trackers.size());
for ( ;; ){
if( !paused ){
cap >> frame;
if(frame.empty()){
break;
}
frame.copyTo( image );
if(fgets(buf,sizeof(buf),gt)==NULL){
//.........这里部分代码省略.........
示例3: main
/*
* To work with Kinect or XtionPRO the user must install OpenNI library and PrimeSensorModule for OpenNI and
* configure OpenCV with WITH_OPENNI flag is ON (using CMake).
*/
int main( int argc, char* argv[] )
{
time_t start = time(0);
bool isColorizeDisp, isFixedMaxDisp;
int imageMode;
bool retrievedImageFlags[5];
string filename;
bool isVideoReading;
//parseCommandLine( argc, argv, isColorizeDisp, isFixedMaxDisp, imageMode, retrievedImageFlags, filename, isVideoReading );
if (pcl::io::loadPCDFile<pcl::PointXYZ> ("test_pcd.pcd", *cloud_golden) == -1) //* load the file
{
PCL_ERROR ("Couldn't read file test_pcd.pcd \n");
return (-1);
}
std::cout << "Loaded "
<< cloud_golden->width * cloud_golden->height
<< " data points from test_pcd.pcd with the following fields: "
<< std::endl;
//
pcl::copyPointCloud (*cloud_golden, *cloud_transformed);
cout << "Device opening ..." << endl;
cout << CV_CAP_OPENNI <<endl;
VideoCapture capture;
if( isVideoReading )
capture.open( filename );
else
capture.open(CV_CAP_OPENNI);
cout << "done." << endl;
if( !capture.isOpened() )
{
cout << "Can not open a capture object." << endl;
return -1;
}
if( !isVideoReading )
{
bool modeRes=false;
switch ( imageMode )
{
case 0:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ );
break;
case 1:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_15HZ );
break;
case 2:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_30HZ );
break;
//The following modes are only supported by the Xtion Pro Live
case 3:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_30HZ );
break;
case 4:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_60HZ );
break;
default:
CV_Error( CV_StsBadArg, "Unsupported image mode property.\n");
}
if (!modeRes)
cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl;
}
if(capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) == 0) capture.set(CV_CAP_PROP_OPENNI_REGISTRATION,1);
// Print some avalible device settings.
cout << "\nDepth generator output mode:" << endl <<
"FRAME_WIDTH " << capture.get( CV_CAP_PROP_FRAME_WIDTH ) << endl <<
"FRAME_HEIGHT " << capture.get( CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
"FRAME_MAX_DEPTH " << capture.get( CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
"FPS " << capture.get( CV_CAP_PROP_FPS ) << endl <<
"REGISTRATION " << capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) << endl;
if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) )
{
cout <<
"\nImage generator output mode:" << endl <<
"FRAME_WIDTH " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << endl <<
"FRAME_HEIGHT " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
"FPS " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl;
}
else
{
cout << "\nDevice doesn't contain image generator." << endl;
if (!retrievedImageFlags[0] && !retrievedImageFlags[1] && !retrievedImageFlags[2])
return 0;
}
if( !face_cascade.load( cascade_name[0] ) )
{
printf("--(!)Error loading\n"); return -1;
};
if( !eyes_cascade.load( cascade_name[1] ) )
{
printf("--(!)Error loading\n"); return -1;
};
//printf("Entering for\n");
//.........这里部分代码省略.........
示例4: main
int main(int argc, const char **argv)
{
VideoCapture cap;
Tracker objTracker;
CommandLineParser parser(argc, argv, keys);
if (parser.has("help")) {
help();
return 0;
}
cap.open(argv[1]);
if (!cap.isOpened()) {
help();
cout << "***Could not access file...***\n";
return -1;
}
Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH), //Acquire input size
(int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));
cout << hot_keys;
bool paused = false;
Mat frame;
cap >> frame;
objTracker.Init(S, Tracker::InitParams());
int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
VideoWriter outputVideo;
// outputVideo.open("output.mp4" , ex, cap.get(CV_CAP_PROP_FPS), S, true);
Mat out;
try {
while (1) {
if (!paused && Tracker::g_initTracking) {
cap >> frame;
if (frame.empty())
break;
}
if (!paused) {
objTracker.ProcessFrame(frame, out);
}
imshow("CamShift", out);
// outputVideo << out;
char c = (char)waitKey(10);
if (c == 27)
break;
switch (c) {
case 'b':
objTracker.ToggleShowBackproject();
break;
case 'c':
// trackObject = 0;
// histimg = Scalar::all(0);
break;
case 'h':
objTracker.HideControlsGUI();
// showHist = !showHist;
// if (!showHist)
// destroyWindow("Histogram");
// else
// namedWindow("Histogram", 1);
// break;
case 'p':
paused = !paused;
break;
case 'r':
cap.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
// outputVideo.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
cap >> frame;
objTracker.Init(S, Tracker::InitParams());
break;
default:
;
}
}
}
catch (const cv::Exception &e) {
std::cerr << e.what();
cap.release();
outputVideo.release();
return 1;
}
cap.release();
outputVideo.release();
return 0;
}
示例5: main
int main(int argc, char* argv[])
{
int numBoards = atoi(argv[1]);
int board_w = atoi(argv[2]);
int board_h = atoi(argv[3]);
Size board_sz = Size(board_w, board_h);
int board_n = board_w*board_h;
vector<vector<Point3f> > object_points;
vector<vector<Point2f> > image_points;
vector<Point2f> corners;
vector<Point3f> obj;
for (int j=0; j<board_n; j++)
{
obj.push_back(Point3f(j/board_h, j%board_h, 0.0f));
}
Mat img, gray;
cout << "Device opening ..." << endl;
VideoCapture capture;
capture.open( CV_CAP_OPENNI );
//registration
if(capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) == 0)
{
capture.set(CV_CAP_PROP_OPENNI_REGISTRATION, 1);
cout << "\nImages have been registered ..." << endl;
}
//cout << cv::getBuildInformation() << endl;
if( !capture.isOpened() )
{
cout << "Can not open a capture object." << endl;
return -1;
}
if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) )
{
cout <<
"\nImage generator output mode:" << endl <<
"FRAME_WIDTH " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) <<
" | FRAME_HEIGHT " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) <<
" | FPS " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl;
}
else
{
cout << "\nDevice doesn't contain image generator." << endl;
}
int success = 0;
int k = 0;
bool found = false;
while (success < numBoards)
{
if( !capture.grab() )
{
cout << "Can not grab images." << endl;
return -1;
}
//capture.retrieve( depth, CV_CAP_OPENNI_DEPTH_MAP );
capture.retrieve( img, CV_CAP_OPENNI_BGR_IMAGE );
cvtColor(img, gray, CV_BGR2GRAY);
found = findChessboardCorners(gray, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
if (found)
{
cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(gray, board_sz, corners, found);
}
// imshow("image", img);
imshow("corners", gray);
k = waitKey(10);
if (found)
{
k = waitKey(0);
}
if (k == 27)
{
break;
}
if (k == ' ' && found !=0)
{
image_points.push_back(corners);
object_points.push_back(obj);
printf ("Corners stored\n");
success++;
if (success >= numBoards)
{
break;
}
}
//.........这里部分代码省略.........
示例6: main
int main(int argc, char* argv[])
{
/* CONFIGURAÇÃO SERIAL */
struct termios tio;
struct termios stdio;
struct termios old_stdio;
int tty_usb;
char buffer [33];
/*
* CONFIGURE USB PORT
*/
configureOximeter(&stdio, &tio, &old_stdio);
/*
* OPENNING USB PORT TO I/O COMMUNICATION
*/
openUSB(&tty_usb, &tio);
/*
* LEITURA E ESCRITA NA PORTA USB
*
*/
/* FIM DA CONFIGURAÇÃO SERIAL */
//some boolean variables for different functionality within this
//program
bool trackObjects = true;
bool useMorphOps = true;
//Matrix to store each frame of the webcam feed
Mat cameraFeed;
//matrix storage for HSV image
Mat HSV;
//matrix storage for binary threshold image
Mat threshold;
//x and y values for the location of the object
int x=0, y=0;
//create slider bars for HSV filtering
createTrackbars();
//video capture object to acquire webcam feed
VideoCapture capture;
//open capture object at location zero (default location for webcam)
capture.open(0);
//set height and width of capture frame
capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
//start an infinite loop where webcam feed is copied to cameraFeed matrix
//all of our operations will be performed within this loop
while(1){
//store image to matrix
capture.read(cameraFeed);
//convert frame from BGR to HSV colorspace
cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
//filter HSV image between values and store filtered image to
//threshold matrix
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
//perform morphological operations on thresholded image to eliminate noise
//and emphasize the filtered object(s)
if(useMorphOps)
morphOps(threshold);
//pass in thresholded frame to our object tracking function
//this function will return the x and y coordinates of the
//filtered object
if(trackObjects)
{
trackFilteredObject(x,y,threshold,cameraFeed);
write(tty_usb, &y, 8); //escreve p na porta serial
//read(tty_usb, &leitura, 4); //ler a porta serial
}
//exibir os frames
imshow(windowName2,threshold);
imshow(windowName,cameraFeed);
imshow(windowName1,HSV);
//delay de 30ms para a atualização da tela.
//sem esse comando não aparece imagem!!!!
waitKey(200);
}
return 0;
}
示例7: main
int main( int argc, char** argv )
{
VideoCapture cap;
help();
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
cap.open(argc == 2 ? argv[1][0] - '0' : 0);
else if( argc >= 2 )
{
cap.open(argv[1]);
if( cap.isOpened() )
cout << "Video " << argv[1] <<
": width=" << cap.get(CV_CAP_PROP_FRAME_WIDTH) <<
", height=" << cap.get(CV_CAP_PROP_FRAME_HEIGHT) <<
", nframes=" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl;
if( argc > 2 && isdigit(argv[2][0]) )
{
int pos;
sscanf(argv[2], "%d", &pos);
cout << "seeking to frame #" << pos << endl;
cap.set(CV_CAP_PROP_POS_FRAMES, pos);
}
}
if( !cap.isOpened() )
{
cout << "Could not initialize capturing...\n";
return -1;
}
namedWindow( "Laplacian", 0 );
createTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 );
Mat smoothed, laplace, result;
for(;;)
{
Mat frame;
cap >> frame;
if( frame.empty() )
break;
int ksize = (sigma*5)|1;
if(smoothType == CV_GAUSSIAN)
GaussianBlur(frame, smoothed, Size(ksize, ksize), sigma, sigma);
else if(smoothType == CV_BLUR)
blur(frame, smoothed, Size(ksize, ksize));
else
medianBlur(frame, smoothed, ksize);
Laplacian(smoothed, laplace, CV_16S, 5);
convertScaleAbs(laplace, result, (sigma+1)*0.25);
imshow("Laplacian", result);
int c = waitKey(30);
if( c == ' ' )
smoothType = smoothType == CV_GAUSSIAN ? CV_BLUR : smoothType == CV_BLUR ? CV_MEDIAN : CV_GAUSSIAN;
if( c == 'q' || c == 'Q' || (c & 255) == 27 )
break;
}
return 0;
}
示例8: main
int main( int argc, char** argv )
{
help();
VideoCapture capture;
SerializeHelper sHelp = SerializeHelper();
Forest forest = sHelp.loadForest("adult.txt");
// Open webcam
capture.open(CV_CAP_INTELPERC);
//cap.open("d2.avi");
if( !capture.isOpened() )
{
cout << "Could not initialize capturing...\n";
return -1;
}
capture.set(CV_CAP_INTELPERC_IMAGE_GENERATOR | CV_CAP_PROP_INTELPERC_PROFILE_IDX, 0);
capture.set(CV_CAP_INTELPERC_DEPTH_GENERATOR | CV_CAP_PROP_INTELPERC_PROFILE_IDX, 0);
namedWindow( "Depth", 1 );
namedWindow( "Color", 1 );
Mat gray, prevGray, image;
vector<Point2f> points[2];
//rect =
ImagePacket images = getFrames(capture);
Mat threshDepth;
int threshDist = 750;
threshold(images.getDepth(), threshDepth, threshDist, 100000, THRESH_TOZERO_INV);
threshDepth.convertTo(threshDepth, CV_8U);
Mat segmentedImage = forest.classifyImage(threshDepth);
imshow("Segmentation", convertToColorForBaby(segmentedImage));
Rect rect = isolateBodyPart(segmentedImage, HEAD);
TemplateTracker hTrack = TemplateTracker();
cvWaitKey(10);
hTrack.initialize(rect, images.getColor(), threshDepth, 0);
Mat color, uvMap;
for(;;)
{
ImagePacket images = getFrames(capture);
threshold(images.getDepth(), threshDepth, threshDist, 100000, THRESH_TOZERO_INV);
threshDepth.convertTo(threshDepth, CV_8U);
hTrack.track(images.getColor(), threshDepth);
forehead = getForeheadFromHead(hTrack.getTrackedRegion());
color = images.getColor();
uvMap = images.getUVMap();
Mat foreheadDepth = threshDepth(forehead);
imshow("forehead", foreheadDepth);
transpose(threshDepth, threshDepth);
transpose(color, color);
transpose(foreheadDepth, foreheadDepth);
for(int i = 0; i < foreheadDepth.rows; i++) {
for(int j = 0; j < foreheadDepth.cols; j++) {
if(foreheadDepth.at<uchar>(i,j) != 0) {
Point cPoint = translateDepthToColor(Point(j+forehead.y, i+forehead.x), color, uvMap);
if(cPoint.x < color.cols && cPoint.y < color.rows)
circle( color, cPoint, 3, Scalar(0,255,0), -1, 8);
}
}
}
transpose(threshDepth, threshDepth);
transpose(color, color);
rectangle(threshDepth, hTrack.getTrackedRegion(), Scalar(255, 0, 0), 2, 8, 0);
rectangle(threshDepth, forehead, Scalar(255, 0, 0), 2, 8, 0);
//.........这里部分代码省略.........
示例9: main
int main(int argc, char* argv[])
{
//some boolean variables for different functionality within this
//program
bool trackObjects = false;
bool useMorphOps = false;
//Matrix to store each frame of the webcam feed
Mat cameraFeed;
//matrix storage for HSV image
Mat HSV;
//matrix storage for binary threshold image
Mat threshold;
//x and y values for the location of the object
int x=0, y=0;
//create slider bars for HSV filtering
createTrackbars();
//video capture object to acquire webcam feed
VideoCapture capture;
//open capture object at location zero (default location for webcam)
capture.open(0);
//set height and width of capture frame
capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
//start an infinite loop where webcam feed is copied to cameraFeed matrix
//all of our operations will be performed within this loop
while(1){
//store image to matrix
capture.read(cameraFeed);
//convert frame from BGR to HSV colorspace
cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
//filter HSV image between values and store filtered image to
//threshold matrix
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
//perform morphological operations on thresholded image to eliminate noise
//and emphasize the filtered object(s)
if(useMorphOps)
morphOps(threshold);
//pass in thresholded frame to our object tracking function
//this function will return the x and y coordinates of the
//filtered object
if(trackObjects)
trackFilteredObject(x,y,threshold,cameraFeed);
//show frames
imshow(windowName2,threshold);
imshow(windowName,cameraFeed);
imshow(windowName1,HSV);
//delay 30ms so that screen can refresh.
//image will not appear without this waitKey() command
waitKey(30);
}
return 0;
}
示例10: main
int main( int argc, char** argv ){
CommandLineParser parser( argc, argv, keys );
String tracker_algorithm = parser.get<String>( 0 );
String video_name = parser.get<String>( 1 );
int start_frame = parser.get<int>( 2 );
if( tracker_algorithm.empty() || video_name.empty() )
{
help();
return -1;
}
int coords[4]={0,0,0,0};
bool initBoxWasGivenInCommandLine=false;
{
String initBoundingBox=parser.get<String>(3);
for(size_t npos=0,pos=0,ctr=0;ctr<4;ctr++){
npos=initBoundingBox.find_first_of(',',pos);
if(npos==string::npos && ctr<3){
printf("bounding box should be given in format \"x1,y1,x2,y2\",where x's and y's are integer cordinates of opposed corners of bdd box\n");
printf("got: %s\n",initBoundingBox.substr(pos,string::npos).c_str());
printf("manual selection of bounding box will be employed\n");
break;
}
int num=atoi(initBoundingBox.substr(pos,(ctr==3)?(string::npos):(npos-pos)).c_str());
if(num<=0){
printf("bounding box should be given in format \"x1,y1,x2,y2\",where x's and y's are integer cordinates of opposed corners of bdd box\n");
printf("got: %s\n",initBoundingBox.substr(pos,npos-pos).c_str());
printf("manual selection of bounding box will be employed\n");
break;
}
coords[ctr]=num;
pos=npos+1;
}
if(coords[0]>0 && coords[1]>0 && coords[2]>0 && coords[3]>0){
initBoxWasGivenInCommandLine=true;
}
}
//open the capture
VideoCapture cap;
cap.open( video_name );
cap.set( CAP_PROP_POS_FRAMES, start_frame );
if( !cap.isOpened() )
{
help();
cout << "***Could not initialize capturing...***\n";
cout << "Current parameter's value: \n";
parser.printMessage();
return -1;
}
Mat frame;
paused = true;
namedWindow( "Tracking API", 1 );
setMouseCallback( "Tracking API", onMouse, 0 );
//instantiates the specific Tracker
Ptr<Tracker> tracker = Tracker::create( tracker_algorithm );
if( tracker == NULL )
{
cout << "***Error in the instantiation of the tracker...***\n";
return -1;
}
//get the first frame
cap >> frame;
/*Size imageSize,imageSize2;
imageSize = frame.size();
imageSize2 = frame.size();
Rect ROI = Rect(int(imageSize.width/5), int(imageSize.height*0.22),int(imageSize.width*0.6), int(imageSize.height*0.55));
Mat temp = frame.clone();
Mat map1, map2;
initUndistortRectifyMap(Camera_Matrix, Distortion_Coeff, Mat(),
getOptimalNewCameraMatrix(Camera_Matrix, Distortion_Coeff, imageSize, 0.8, imageSize2, 0),
imageSize2, CV_16SC2, map1, map2);
remap(temp, frame, map1, map2, INTER_LINEAR);
Mat image = frame(ROI);
frame = image.clone();*/
// Prabhudev Prakash
pMOG2 = createBackgroundSubtractorMOG2(5000);
// Prabhudev Prakash
frame.copyTo( image );
if(initBoxWasGivenInCommandLine){
selectObject=true;
paused=false;
boundingBox.x = coords[0];
boundingBox.y = coords[1];
boundingBox.width = std::abs( coords[2] - coords[0] );
boundingBox.height = std::abs( coords[3]-coords[1]);
printf("bounding box with vertices (%d,%d) and (%d,%d) was given in command line\n",coords[0],coords[1],coords[2],coords[3]);
rectangle( image, boundingBox, Scalar( 255, 0, 0 ), 2, 1 );
//.........这里部分代码省略.........
示例11: main
int main(int argc, char *argv[])
{
int listenfd,connfd;
pid_t pid;
socklen_t chilen;
struct sockaddr_in cliaddr,servaddr;
void sig_chld(int signo) ;
if(!cap.isOpened())
{
printf("no camera!");
return -1;
}
cap.set(CV_CAP_PROP_FRAME_WIDTH,640);
cap.set(CV_CAP_PROP_FRAME_HEIGHT,480);
if(!cap1.isOpened())
{
printf("no camera1!");
return -1;
}
cap1.set(CV_CAP_PROP_FRAME_WIDTH,320);
cap1.set(CV_CAP_PROP_FRAME_HEIGHT,240);
if(!cap2.isOpened())
{
printf("no camera2!");
return -1;
}
cap2.set(CV_CAP_PROP_FRAME_WIDTH,320);
cap2.set(CV_CAP_PROP_FRAME_HEIGHT,240);
listenfd=socket(AF_INET,SOCK_STREAM,0);
bzero(&servaddr,sizeof(servaddr));
servaddr.sin_family=AF_INET; // IPV4
servaddr.sin_port=htons(portnumber); // (将本机器上的short数据转化为网络上的short数据)端口号
servaddr.sin_addr.s_addr=htonl(INADDR_ANY); // IP地址
bind(listenfd,(sockaddr *)&servaddr,sizeof(sockaddr));
listen(listenfd,10);
signal(SIGCHLD,sig_chld);
for(;;)
{
chilen=sizeof(cliaddr);
if( (connfd=accept(listenfd,(sockaddr *)&cliaddr,&chilen))<0 )
{
if(errno==EINTR)
continue;
else
system("echo accept ");
}
if((pid=fork())==0)
{
close(listenfd);
str_echo(connfd);
close(connfd);
exit(0);
}
close(connfd);
}
}
示例12: onTrackbarSlide2
void onTrackbarSlide2(int pos, void* userData=NULL){
cap.set(CV_CAP_PROP_POS_FRAMES,pos);
}
示例13: main
int main(int argc, char** argv)
{
pthread_t thread_c;
int key;
if (argc < 3) {
quit("Usage: netcv_client <server_ip> <server_port> <input_file>(optional)", 0);
}
if (argc == 4) {
capture.open(argv[3]);
} else {
capture.open(0);
}
if (!capture.isOpened()) {
quit("\n--> cvCapture failed", 1);
}
server_ip = argv[1];
server_port = atoi(argv[2]);
capture.set(CV_CAP_PROP_FRAME_WIDTH,320);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,240);
capture >> img0;
img1 = Mat::zeros(img0.rows, img0.cols ,CV_8UC1);
// run the streaming client as a separate thread
if (pthread_create(&thread_c, NULL, streamClient, NULL)) {
quit("\n--> pthread_create failed.", 1);
}
cout << "\n--> Press 'q' to quit. \n\n" << endl;
/* print the width and height of the frame, needed by the client */
cout << "\n--> Transferring (" << img0.cols << "x" << img0.rows << ") images to the: " << server_ip << ":" << server_port << endl;
//namedWindow("stream_client", CV_WINDOW_AUTOSIZE);
//flip(img0, img0, 1);
//cvtColor(img0, img1, CV_BGR2GRAY);
while(key != 'q') {
/* get a frame from camera */
//capture >> img0;
//if (img0.empty()) break;
pthread_mutex_lock(&mutex);
capture >> img0;
if (img0.empty()) break;
//flip(img0, img0, 1);
//cvtColor(img0, img1, CV_BGR2GRAY);
is_data_ready = 1;
pthread_mutex_unlock(&mutex);
/*also display the video here on client */
imshow("stream_client", img2);
key = waitKey(30);
}
/* user has pressed 'q', terminate the streaming client */
if (pthread_cancel(thread_c)) {
quit("\n--> pthread_cancel failed.", 1);
}
/* free memory */
//destroyWindow("stream_client");
quit("\n--> NULL", 0);
return 0;
}
示例14: focusDriveEnd
static void focusDriveEnd(VideoCapture & cap, int direction)
{
while (cap.set(CAP_PROP_ZOOM, (double) MAX_FOCUS_STEP * direction))
;
}
示例15: main
int main( int argc, char** argv )
{
VideoCapture cap;
cv::CommandLineParser parser(argc, argv, "{help h | | }{ c | 0 | }{ p | | }");
if ( parser.has("help") )
{
help();
return 0;
}
if( parser.get<string>("c").size() == 1 && isdigit(parser.get<string>("c")[0]) )
cap.open(parser.get<int>("c"));
else
cap.open(parser.get<string>("c"));
if( cap.isOpened() )
cout << "Video " << parser.get<string>("c") <<
": width=" << cap.get(CAP_PROP_FRAME_WIDTH) <<
", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) <<
", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl;
if( parser.has("p") )
{
int pos = parser.get<int>("p");
if (!parser.check())
{
parser.printErrors();
return -1;
}
cout << "seeking to frame #" << pos << endl;
cap.set(CAP_PROP_POS_FRAMES, pos);
}
if( !cap.isOpened() )
{
cout << "Could not initialize capturing...\n";
return -1;
}
namedWindow( "Laplacian", 0 );
createTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 );
Mat smoothed, laplace, result;
for(;;)
{
Mat frame;
cap >> frame;
if( frame.empty() )
break;
int ksize = (sigma*5)|1;
if(smoothType == GAUSSIAN)
GaussianBlur(frame, smoothed, Size(ksize, ksize), sigma, sigma);
else if(smoothType == BLUR)
blur(frame, smoothed, Size(ksize, ksize));
else
medianBlur(frame, smoothed, ksize);
Laplacian(smoothed, laplace, CV_16S, 5);
convertScaleAbs(laplace, result, (sigma+1)*0.25);
imshow("Laplacian", result);
char c = (char)waitKey(30);
if( c == ' ' )
smoothType = smoothType == GAUSSIAN ? BLUR : smoothType == BLUR ? MEDIAN : GAUSSIAN;
if( c == 'q' || c == 'Q' || c == 27 )
break;
}
return 0;
}