本文整理汇总了C++中VideoCapture::get方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoCapture::get方法的具体用法?C++ VideoCapture::get怎么用?C++ VideoCapture::get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoCapture
的用法示例。
在下文中一共展示了VideoCapture::get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char* argv[])
{
VideoCapture cap;
VideoWriter output;
string inFile = "earth_4_orig.mov";
Mat frame1, frame2, NewFrame;
int ver = 2;
int hor = 2;
int frameCount = 1;
bool quietMode = false;
bool reportMode = false;
bool displayMode = false;
if(argc > 1)
{
for(int i = 1; i < argc; ++i)
{
if(strcmp(argv[i], "-f") == 0)
{
inFile = string(argv[++i]);
}
else if(strcmp(argv[i], "-v") == 0)
{
ver = atoi(argv[++i]);
}
else if(strcmp(argv[i], "-h") == 0)
{
hor = atoi(argv[++i]);
}
else if(strcmp(argv[i], "-q") == 0)
{
quietMode = true;
}
else if(strcmp(argv[i], "-r") == 0)
{
reportMode = true;
}
else if(strcmp(argv[i], "-d") == 0)
{
displayMode = true;
}
else
{
cout << "Invalid argument " << argv[i] << endl;
printUsage();
}
}
}
else
{
printUsage();
return -1;
}
cap.open(inFile);
int maxFrame = cap.get(CV_CAP_PROP_FRAME_COUNT);
int origWid = cap.get(CV_CAP_PROP_FRAME_WIDTH);
int origHei = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
if(!cap.isOpened())
{
printf("!!! cvCaptureFromAVI failed (file not found?)\n");
return -1;
}
int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
Size S = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH) -ver , (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT)-hor);
//char key = 0;
int first = 1;
int last = 0;
NewFrame = Mat::zeros(S, CV_32F);
string::size_type pAt = inFile.find_last_of('.'); // Find extension point
const string outFile = inFile.substr(0, pAt) + "-basic.mov";
output.open(outFile, ex, cap.get(CV_CAP_PROP_FPS), S, true);
clock_t startTime = clock();
if(quietMode == false)
cout << "Processing " << maxFrame << " frames..." << endl;
//int fps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
while (/*key != 'q' && */ !last)
{
if(first ==1 )
{
cap >> frame1;
if (frame1.empty())
{
printf("!!! cvQueryFrame failed: no frame\n");
break;
}
first = 0;
continue;
}
else
{
示例2: main
int main(int argc, const char** argv)
{
VideoCapture vc;
std::vector<char> s(4096);
if (!vc.open(VIDEO_FILE)) {
CV_Error_(-1, ("failed to open video: \"%s\"", VIDEO_FILE));
std::exit(1);
}
int key = 0;
bool pause = false;
Point selection(-1000, -1000);
Mat pristine, a, b;
namedWindow(WINDOW_NAME, CV_WINDOW_NORMAL);
resizeWindow(WINDOW_NAME,
(int)vc.get(CV_CAP_PROP_FRAME_WIDTH),
(int)vc.get(CV_CAP_PROP_FRAME_HEIGHT));
setMouseCallback(WINDOW_NAME, onMouse, &selection);
while (true)
{
if ((unsigned long) cvGetWindowHandle(WINDOW_NAME) == 0UL) {
break;
}
if (!pause) {
if (! vc.read(pristine)) {
vc.set(CV_CAP_PROP_POS_FRAMES, 0U);
vc.read(pristine);
};
}
pristine.copyTo(a);
Mat& post = a;
rectangle(post,
selection - SELECT_HALF_SIZE,
selection + SELECT_HALF_SIZE,
Scalar(0,255,0), SELECT_LINE_WIDTH);
std::sprintf(&s[0], "CNT: %5u", (unsigned) vc.get(CV_CAP_PROP_FRAME_COUNT));
putText(post,
&s[0],
Point(vc.get(CV_CAP_PROP_FRAME_WIDTH)-200,TEXT_LINE_PITCH * 1),
FONT_HERSHEY_PLAIN,
1,
Scalar(255,255,255));
std::sprintf(&s[0], "F#: %5u", (unsigned) vc.get(CV_CAP_PROP_POS_FRAMES));
putText(post,
&s[0],
Point(vc.get(CV_CAP_PROP_FRAME_WIDTH)-200,TEXT_LINE_PITCH * 2),
FONT_HERSHEY_PLAIN,
1,
Scalar(255,255,255));
imshow(WINDOW_NAME, post);
key = waitKey(1);
if (key == 27) {
break;
}
else if (key == 32) {
pause = !pause;
}
if (key != -1) {
std::cerr << "key=" << key << std::endl;
}
}
vc.release();
return 0;
}
示例3: FrameMessage
void skizImage::FrameMessage(VideoCapture cap)
{
printf("MSEC = %lfs\t",cap.get(property(0))/1000);
printf("FRAMES=%lf\t",cap.get(property(1)));
printf("AVI_RATIO=%lf\n",cap.get(property(2)));
}
示例4: main
int main(int argc, char** argv)
{
// variable initialization
int keyInput = 0;
int nFrames = 0, nSmoothFrames = 0, nFailedFrames = 0, nBlindFrames = 0;
int lastDx = 0, lastDy = 0;
bool bOverlay = true; // plot overlay?
bool bTrace = true & bOverlay; // plot 'bubble' trace? (only when overlay active)
Ptr<BackgroundSubtractor> pMOG2;
VideoCapture capture; // input video capture
VideoWriter outputVideo; // output video writer
Mat curFrame, // current original frame
fgMaskMOG2, // foreground mask from MOG2 algorithm
bgImg, // container for background image from MOG2
grayFrame, // grayscale conversion of original frame
frameDil, // dilated grayscale frame
canny_out; // output of Canny algorithm for shape outline detection
Mat *pOutMat = &curFrame; // pointer to image that will be rendered once per input video frame
Mat strucElem = getStructuringElement(MORPH_RECT, Size(3, 3)); // dilatation base element
// containers for output of findContours()
vector<Mat> contours;
vector<Vec4i> hierarchy;
// read video input filename from command line and construct output filename
if (argc < 2) {
cerr << "Please provide input video filename." << endl;
return EXIT_FAILURE;
}
string filename(argv[1]);
string outName = filename.substr(0, filename.length() - 4) + "_out.avi";
Rect lastKnownRect, lastRect;
Point lastKnownPos, lastPos, estimatePos, plotPos;
list<Point> lastKnownPositions;
// init 'live' video output window
namedWindow("Motion tracking");
// try to open input file
capture.open(filename);
if (!capture.isOpened()) {
cerr << "Unable to open file '" << filename << "'." << endl;
return EXIT_FAILURE;
} else {
cout << "Successfully opened file '" << filename << "'." << endl;
}
// try to write to output file
Size vidS = Size((int)capture.get(CV_CAP_PROP_FRAME_WIDTH), (int)capture.get(CV_CAP_PROP_FRAME_HEIGHT));
outputVideo.open(outName, CV_FOURCC('P','I','M','1'), capture.get(CV_CAP_PROP_FPS), vidS, true);
if (!outputVideo.isOpened()) {
cerr << "Unable to write to output video." << endl;
return EXIT_FAILURE;
}
// build frame buffer and background subtractor
pMOG2 = createBackgroundSubtractorMOG2(500, 30., true);
// main loop over frames
while (capture.read(curFrame) && (char)keyInput != 'q')
{
++nFrames;
cvtColor(curFrame, grayFrame, CV_BGR2GRAY); // convert to grayscale
threshold(grayFrame, grayFrame, 128., 0., CV_THRESH_TRUNC); // try to mitigate (white) reflections by truncating the current frame
GaussianBlur(grayFrame, grayFrame, Size(7, 7), 0, 0);
pMOG2->apply(grayFrame, fgMaskMOG2);
// erode and dilate to remove some noise
erode(fgMaskMOG2, frameDil, strucElem);
dilate(frameDil, frameDil, strucElem);
// dilate and erode to remove holes from foreground
dilate(frameDil, frameDil, strucElem);
erode(frameDil, frameDil, strucElem);
// canny to find foreground outlines
Canny(frameDil, canny_out, 100, 200, 3);
// find contours, sort by contour size (descending)
findContours(canny_out, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); // find contours
sort(contours.begin(), contours.end(), rvs_cmp_contour_area); // sort by contour area, beginning with the largest
// determine largest "moving" object
int iMaxSize = 0;
bool bFoundCloseContour = false;
for (unsigned int i = 0; i < contours.size(); i++)
{
if (contourArea(contours[i]) < CONTOUR_AREA_THRESH) // ignore contours which are too small (noise)
break;
// ignore contours which are too far away from the last frame
Rect boun = boundingRect(contours[i]); // bounding rect
//.........这里部分代码省略.........
示例5: main
/**
* @brief main function
*
* @return 0
*/
int main(void)
{
HelpMain();
HelpSelectCamera();
VideoCapture cap;
if (camera_device_index != -1) {
/// open camera
cap = VideoCapture(camera_device_index);
} else {
/// open video
cout << "please input video name with pathname: ";
String video_name;
cin >> video_name;
cout << "the video name is :" << video_name << endl;
cap = VideoCapture(video_name);
}
if(!cap.isOpened()) {
cout << "Can not find the default camera from you computer!\n";
cin.get();
return -1;
}
/// wait for camera to get ready
waitKey(2000);
/// video information
totalFrameNumber = cap.get(CV_CAP_PROP_FRAME_COUNT);
frameToStart = 30;
frameToStop = 140;
rate = cap.get(CV_CAP_PROP_FPS);
currentFrame = frameToStart;
delay = 1000/rate;
if (camera_device_index == -1) {
cap.set( CV_CAP_PROP_POS_FRAMES,frameToStart);
}
/// read a frame to get the camera image state
cap.read(frame);
resize(frame, frame, Size(640,480));
cout << "image height = " << frame.rows << endl;
cout << "image width = " << frame.cols << endl;
cout << "image channel = " << frame.channels() << endl;
imshow("camera", frame);
cout << "camera/video open success\n";
waitKey(30);
HelpCaptureImage();
/// start show the camera
char key = -1;
for(;;) {
Mat frame;
cap.read(frame);
frame = vehicle_detection_system(frame);
currentFrame++;
imshow("camera", frame);
key = waitKey(30);
/// exit programe
if(key == 27) {
destroyAllWindows();
break;
}
/// capture image
if(key == 'S' || key == 's') {
capture_color_image = frame.clone();
cvtColor(capture_color_image, capture_gray_image,
cv::COLOR_BGR2GRAY);
// imshow("color", capture_color_image);
// imshow("gray", capture_gray_image);
// imwrite("color.bmp", capture_color_image);
// imwrite("gray.bmp", capture_gray_image);
}
}
cap.release();
return 0;
}
示例6: VideoDemos
void VideoDemos( VideoCapture& surveillance_video, int starting_frame, bool clean_binary_images )
{
Mat previous_gray_frame, optical_flow, optical_flow_display;
Mat current_frame, thresholded_image, closed_image, first_frame;
Mat current_frame_gray, running_average_background;
Mat temp_running_average_background, running_average_difference;
Mat running_average_foreground_mask, running_average_foreground_image;
Mat selective_running_average_background;
Mat temp_selective_running_average_background, selective_running_average_difference;
Mat selective_running_average_foreground_mask, selective_running_average_background_mask, selective_running_average_foreground_image;
double running_average_learning_rate = 0.01;
surveillance_video.set(CV_CAP_PROP_POS_FRAMES,starting_frame);
surveillance_video >> current_frame;
first_frame = current_frame.clone();
cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);
current_frame.convertTo(running_average_background, CV_32F);
selective_running_average_background = running_average_background.clone();
int rad = running_average_background.depth();
MedianBackground median_background( current_frame, (float) 1.005, 1 );
Mat median_background_image, median_foreground_image;
int codec = static_cast<int>(surveillance_video.get(CV_CAP_PROP_FOURCC));
// V3.0.0 update on next line. OLD CODE was BackgroundSubtractorMOG2 gmm; //(50,16,true);
Ptr<BackgroundSubtractorMOG2> gmm = createBackgroundSubtractorMOG2();
Mat foreground_mask, foreground_image = Mat::zeros(current_frame.size(), CV_8UC3);
double frame_rate = surveillance_video.get(CV_CAP_PROP_FPS);
double time_between_frames = 1000.0/frame_rate;
Timestamper* timer = new Timestamper();
int frame_count = 0;
while ((!current_frame.empty()) && (frame_count++ < 1000))//1800))
{
double duration = static_cast<double>(getTickCount());
vector<Mat> input_planes(3);
split(current_frame,input_planes);
cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);
if (frame_count%2 == 0) // Skip every second frame so the flow is greater.
{
if ( previous_gray_frame.data )
{
Mat lucas_kanade_flow;
timer->ignoreTimeSinceLastRecorded();
LucasKanadeOpticalFlow(previous_gray_frame, current_frame_gray, lucas_kanade_flow);
timer->recordTime("Lucas Kanade Optical Flow");
calcOpticalFlowFarneback(previous_gray_frame, current_frame_gray, optical_flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cvtColor(previous_gray_frame, optical_flow_display, CV_GRAY2BGR);
drawOpticalFlow(optical_flow, optical_flow_display, 8, Scalar(0, 255, 0), Scalar(0, 0, 255));
timer->recordTime("Farneback Optical Flow");
char frame_str[100];
sprintf( frame_str, "Frame = %d", frame_count);
Mat temp_output = JoinImagesHorizontally( current_frame, frame_str, optical_flow_display, "Farneback Optical Flow", 4 );
Mat optical_flow_output = JoinImagesHorizontally( temp_output, "", lucas_kanade_flow, "Lucas Kanade Optical Flow", 4 );
imshow("Optical Flow", optical_flow_output );
}
std::swap(previous_gray_frame, current_frame_gray);
}
// Static background image
Mat difference_frame, binary_difference;
Mat structuring_element(3,3,CV_8U,Scalar(1));
timer->ignoreTimeSinceLastRecorded();
absdiff(current_frame,first_frame,difference_frame);
cvtColor(difference_frame, thresholded_image, CV_BGR2GRAY);
threshold(thresholded_image,thresholded_image,30,255,THRESH_BINARY);
if (clean_binary_images)
{
morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
morphologyEx(closed_image,binary_difference,MORPH_OPEN,structuring_element);
current_frame.copyTo(binary_difference, thresholded_image);
}
else
{
binary_difference.setTo(Scalar(0,0,0));
current_frame.copyTo(binary_difference, thresholded_image);
}
timer->recordTime("Static difference");
// Running Average (three channel version)
vector<Mat> running_average_planes(3);
split(running_average_background,running_average_planes);
accumulateWeighted(input_planes[0], running_average_planes[0], running_average_learning_rate);
accumulateWeighted(input_planes[1], running_average_planes[1], running_average_learning_rate);
accumulateWeighted(input_planes[2], running_average_planes[2], running_average_learning_rate);
merge(running_average_planes,running_average_background);
running_average_background.convertTo(temp_running_average_background,CV_8U);
absdiff(temp_running_average_background,current_frame,running_average_difference);
split(running_average_difference,running_average_planes);
// Determine foreground points as any point with a difference of more than 30 on any one channel:
threshold(running_average_difference,running_average_foreground_mask,30,255,THRESH_BINARY);
split(running_average_foreground_mask,running_average_planes);
bitwise_or( running_average_planes[0], running_average_planes[1], running_average_foreground_mask );
bitwise_or( running_average_planes[2], running_average_foreground_mask, running_average_foreground_mask );
if (clean_binary_images)
{
morphologyEx(running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
morphologyEx(closed_image,running_average_foreground_mask,MORPH_OPEN,structuring_element);
}
running_average_foreground_image.setTo(Scalar(0,0,0));
current_frame.copyTo(running_average_foreground_image, running_average_foreground_mask);
//.........这里部分代码省略.........
示例7: fps
inline double fps() { return video.get(CV_CAP_PROP_FPS); };
示例8: main
int main(int argc, char *argv[]) {
ros::init(argc, argv, "verify_tracking_node");
ros::NodeHandle n;
std::string port;
ros::param::param<std::string>("~port", port, "/dev/ttyACM0");
int baud;
ros::param::param<int>("~baud", baud, 57600);
ros::Rate loop_rate(10);
ros::Publisher servox_pub = n.advertise<std_msgs::Char>("servox_chatter", 1000);
ros::Publisher servoy_pub = n.advertise<std_msgs::Char>("servoy_chatter", 1000);
ros::Publisher motor_pub = n.advertise<std_msgs::Char>("motor_chatter", 1000);
ros::Publisher verify_pub = n.advertise<std_msgs::Char>("verify_chatter", 1);
Subscriber track_pub = n.subscribe("track_chatter", 1, trackCallback);
Subscriber host_sub = n.subscribe("host_chatter", 1, hostCallback);
cv_result_t cv_result = CV_OK;
int main_return = -1;
cv_handle_t handle_detect = NULL;
cv_handle_t handle_track = NULL;
VideoCapture capture;
double time;
capture.open(0); // open the camera
if (!capture.isOpened()) {
fprintf(stderr, "Verify track can not open camera!\n");
return -1;
}
capStatus = OPEN;
int frame_width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
int frame_height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
int frame_half_width = frame_width >> 1;
int frame_half_height = frame_height >> 1;
//printf("width %d height %d \n", frame_width, frame_height);
Point expect(frame_half_width , frame_half_height);
struct timeval start0, end0;
struct timeval start1, end1;
struct timeval start2, end2;
struct timeval start3, end3;
struct timeval start4, end4;
struct timeval start5, end5;
#ifdef TIME
gettimeofday(&start0, NULL);
#endif
cv_handle_t handle_verify = cv_verify_create_handle("data/verify.tar");
#ifdef TIME
gettimeofday(&end0, NULL);
time = COST_TIME(start0, end0);
printf("get from verify tar time cost = %.2fs \n", time / 1000000);
#endif
#if 1
const int person_number = 5;
Mat p_image_color_1[person_number], p_image_color_color_1[person_number], p_image_color_2, p_image_color_color_2;
Mat tmp_frame;
cv_face_t *p_face_1[person_number];
cv_face_t *p_face_2;
int face_count_1[person_number] = {0};
int face_count_2 = 0;
cv_feature_t *p_feature_1[person_number];
cv_feature_t *p_feature_new_1[person_number];
unsigned int feature_length_1[person_number];
p_image_color_1[0] = imread("00.JPG");
p_image_color_1[1] = imread("01.JPG");
p_image_color_1[2] = imread("02.JPG");
p_image_color_1[3] = imread("03.JPG");
p_image_color_1[4] = imread("04.JPG");
char *string_feature_1[person_number];
#else
Mat p_image_color_2, p_image_color_color_2;
const int person_number = 4;
cv_face_t *p_face_2 = NULL;
vector<cv_face_t *>p_face_1(person_number,NULL);
vector<int>face_count_1(person_number, 0);
int face_count_2 = 0;
vector<Mat>p_image_color_1(person_number);
vector<Mat>p_image_color_color_1(person_number);
vector<cv_feature_t *>p_feature_1(person_number, NULL);
vector<cv_feature_t *>p_feature_new_1(person_number, NULL);
vector<unsigned int>feature_length_1(person_number, 0);
// load image
p_image_color_1.push_back(imread("01.JPG"));
p_image_color_1.push_back(imread("02.JPG"));
p_image_color_1.push_back(imread("03.JPG"));
p_image_color_1.push_back(imread("04.JPG"));
char *string_feature_1[person_number];
#endif
for(int i = 0; i < person_number; i++)
{
if (!p_image_color_1[i].data ) {
fprintf(stderr, "fail to read %d image \n", i);
//return -1;
goto RETURN;
}
}
for(int i = 0; i < person_number; i++)
cvtColor(p_image_color_1[i], p_image_color_color_1[i], CV_BGR2BGRA);
// init detect handle
//.........这里部分代码省略.........
示例9: main
int main(int argc, char** argv){
Mat image;
int width, height;
VideoCapture cap;
vector<Mat> planes;
Mat histR, histG, histB;
int nbins = 64;
float range[] = {0, 256};
const float *histrange = { range };
bool uniform = true;
bool acummulate = false;
cap.open(0); //seleciona a camera
if(!cap.isOpened()){
cout << "cameras indisponiveis";
return -1;
}
width = cap.get(CV_CAP_PROP_FRAME_WIDTH);
height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
cout << "largura = " << width << endl;
cout << "altura = " << height << endl;
int histw = nbins, histh = nbins/2;
Mat histImgR(histh, histw, CV_8UC3, Scalar(0,0,0));
Mat histImgG(histh, histw, CV_8UC3, Scalar(0,0,0));
Mat histImgB(histh, histw, CV_8UC3, Scalar(0,0,0));
while(1){
cap >> image;
//Redimensionar a captura
resize(image, image, Size(640, 360));
///////////////////////////////////////////////////////////////////////////////////////////////////
// EQUALIZAÇÃO
///////////////////////////////////////////////////////////////////////////////////////////////////
//Separa a imagem capturada em três canais que são armazenados em "planes"
split (image, planes);
//Equalização das capturas
equalizeHist(planes[0], planes[0]);
equalizeHist(planes[1], planes[1]);
equalizeHist(planes[2], planes[2]);
//Utilizamos a função merge() para unir os planos ou canais equalizados em image.
merge(planes, image);
///////////////////////////////////////////////////////////////////////////////////////////////////
calcHist(&planes[0], 1, 0, Mat(), histR, 1,
&nbins, &histrange,
uniform, acummulate);
calcHist(&planes[1], 1, 0, Mat(), histG, 1,
&nbins, &histrange,
uniform, acummulate);
calcHist(&planes[2], 1, 0, Mat(), histB, 1,
&nbins, &histrange,
uniform, acummulate);
normalize(histR, histR, 0, histImgR.rows, NORM_MINMAX, -1, Mat());
normalize(histG, histG, 0, histImgG.rows, NORM_MINMAX, -1, Mat());
normalize(histB, histB, 0, histImgB.rows, NORM_MINMAX, -1, Mat());
histImgR.setTo(Scalar(0));
histImgG.setTo(Scalar(0));
histImgB.setTo(Scalar(0));
for(int i=0; i<nbins; i++){
line(histImgR,
Point(i, histh),
Point(i, histh-cvRound(histR.at<float>(i))),
Scalar(0, 0, 255), 1, 8, 0);
line(histImgG,
Point(i, histh),
Point(i, histh-cvRound(histG.at<float>(i))),
Scalar(0, 255, 0), 1, 8, 0);
line(histImgB,
Point(i, histh),
Point(i, histh-cvRound(histB.at<float>(i))),
Scalar(255, 0, 0), 1, 8, 0);
}
histImgR.copyTo(image(Rect(0, 0 ,nbins, histh)));
histImgG.copyTo(image(Rect(0, histh ,nbins, histh)));
histImgB.copyTo(image(Rect(0, 2*histh ,nbins, histh)));
imshow("image", image);
if(waitKey(30) >= 0) break;
}
return 0;
}
示例10: main
//.........这里部分代码省略.........
cout << scale << "\t" << pitch << "\t" << start_frame << endl;
Mat r1, r2, t1, t2;
Mat prev_r(3, 1, CV_64FC1);
Mat prev_prev_r(3, 1, CV_64FC1);
Mat prev_t(3, 1, CV_64FC1);
Mat prev_prev_t(3, 1, CV_64FC1);
double kkk = 0.7;
int counter = 0;
Mat frame;
for ( ;; ) {
cap >> frame;
if (frame.empty())
return 0;
char* speed = NULL;
velocity_world = detectSpeed(frame, &speed) / 3.6f; // m/s
if (velocity_world>0) {
SKIPPED_FRAMES = k_factor/velocity_world;
if (SKIPPED_FRAMES < 2)
SKIPPED_FRAMES = 2;
else if (SKIPPED_FRAMES > 15)
SKIPPED_FRAMES = 15;
}
else
SKIPPED_FRAMES = 15;
for (int i = 1; i < SKIPPED_FRAMES; i++) {
cap >> frame;
if (frame.empty())
return 0;
}
cout << "speed: " << velocity_world << endl;
unsigned int curr_frame = (unsigned int)cap.get(CV_CAP_PROP_POS_FRAMES);
// status
cout << "Processing: Frame: " << curr_frame << endl;// cap.get(CV_CAP_PROP_POS_FRAMES) << endl;
// catch image read/write errors here
resize(frame, frame, Size(0.5*frame.cols, 0.5*frame.rows));
// undistorted image
clearNearbyOpticalflow(&frame, 420);
cvtColor(frame, gray_frame, COLOR_BGR2GRAY);
int width = gray_frame.cols;
int height = gray_frame.rows;
// convert input images to uint8_t buffer
uint8_t* gray_frame_data = (uint8_t*)malloc(width*height*sizeof(uint8_t));
int32_t k=0;
for (int32_t v=0; v<height; v++) {
for (int32_t u=0; u<width; u++) {
gray_frame_data[k++] = gray_frame.at<char>(v, u);
}
}
vector<Matcher::p_match> p_matched = viso.getMatches();
// compute visual odometry
int32_t dims[] = {width,height,width};
if (viso.process(gray_frame_data,dims)) {
// on success, update current pose
// Matrix pose_tmp = Matrix::eye(4);
// pose_tmp = pose * Matrix::inv(viso.getMotion());
// float fl_pre[16] = {0};
// float fl_cur[16] = {0};
// for (int i = 0; i < 4; i ++ )
// for (int j = 0; j < 4; j ++) {
// fl_pre[i*4+j] = pose.val[i][j];
示例11: main
int main()
{
VideoCapture capture = VideoCapture(CV_CAP_OPENNI);
Size size = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH),capture.get(CV_CAP_PROP_FRAME_HEIGHT));
int codec = CV_FOURCC('D', 'I', 'V', 'X');
VideoWriter writer("video.avi",codec,capture.get(CV_CAP_PROP_FPS),size,0);
namedWindow( "COLOR", CV_WINDOW_AUTOSIZE );
namedWindow( "wireframe", CV_WINDOW_AUTOSIZE );
namedWindow( "FILT", CV_WINDOW_AUTOSIZE );
namedWindow( "BlobCenters", CV_WINDOW_AUTOSIZE );
moveWindow("COLOR", 10, 10);
moveWindow("wireframe", 710, 10);
moveWindow("FILT", 10, 540);
moveWindow("BlobCenters", 710, 540);
if(writer.isOpened())
{
Mat depthMap;
Mat bgrImage ;
Mat filtered;
Mat filtered2;
Point centerOfHand;
//Motion History Mats
Mat blobCenters = Mat::zeros(size,CV_8U);
imshow("BlobCenters",blobCenters);
int prevX, prevY = -1;
vector<Point> scatter;
vector<Point> scatter1;
scatter1.push_back(Point(200,300));
scatter1.push_back(Point(210,310));
scatter1.push_back(Point(220,320));
scatter1.push_back(Point(230,330));
scatter1.push_back(Point(240,340));
bool foundHand;
clock_t gestureTimer;
seconds_count=0;
int X_Displacement=0;
int Y_Displacement=0;
while ( 1 )
{
capture.grab();
capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP );
capture.retrieve( bgrImage, CV_CAP_OPENNI_BGR_IMAGE );
//imshow("depthmap",depthMap);
//Find the minimum value greater than 0 in the matrix
//TEST SECTION
flip(depthMap,depthMap,1);
flip(bgrImage,bgrImage,1);
MatConstIterator_<unsigned short> it = depthMap.begin<unsigned short>(), it_end = depthMap.end<unsigned short>();
unsigned short minVal=60000;
for(;it != it_end; ++it){
if(*it<minVal && *it>0){
minVal=*it;
}
}
//cout << "minVal: " <<minVal<<endl;
unsigned short minRange = minVal-30;
unsigned short maxRange = minVal+60;
//cout << "min,max: "<<minRange<<", "<<maxRange<<endl;
//Perhaps just create another mat with size 8u. This seems to be what happens when
Mat thtwBitDepth;// = cvCreateImage(size,IPL_DEPTH_32F,0);
depthMap.convertTo(thtwBitDepth,CV_32F);//,1.0/256,0);
//imshow("32 Bit",thtwBitDepth);
filtered2 = thresholdDistance(thtwBitDepth,minRange,maxRange);
filtered2 = thresholdDistance(filtered2,25,900);
//imshow("ThresholdDistance",filtered2);
//END TEST SECTION
//inRange(depthMap,25,800,filtered);
//filtered2 = filtered.clone();
filtered2 = smoothImage(filtered2);
imshow("FILT",filtered2);
//.........这里部分代码省略.........
示例12: main
int main(int argc, char* argv[])
{
VideoCapture cap;
VideoWriter output;
string inFile = "88_7_orig.mov";
int ver = 2;
int hor = 2;
int frameCount = 1;
int maxFrames;
bool quietMode = false;
bool reportMode = false;
bool displayMode = false;
char *numWorkers = NULL;
struct timeval startTime, endTime;
if(argc > 1)
{
for(int i = 1; i < argc; ++i)
{
if(strcmp(argv[i], "-f") == 0)
{
inFile = argv[++i];
}
else if(strcmp(argv[i], "-h") == 0)
{
hor = atoi(argv[++i]);
}
else if(strcmp(argv[i], "-w") == 0)
{
numWorkers = argv[++i];
}
else if(strcmp(argv[i], "-v") == 0)
{
ver = atoi(argv[++i]);
}
else if(strcmp(argv[i], "-q") == 0)
{
quietMode = true;
}
else if(strcmp(argv[i], "-r") == 0)
{
reportMode = true;
}
else if(strcmp(argv[i], "-d") == 0)
{
displayMode = true;
}
else
{
cerr << "Unknown flag: " << argv[i] << endl;
printUsage();
}
}
}
else
{
printUsage();
return -1;
}
if(numWorkers == NULL)
numWorkers = (char *)"2";
if (0!= __cilkrts_set_param("nworkers", numWorkers))
{
printf("Failed to set worker count\n");
return 1;
}
cap.open(inFile);
if(!cap.isOpened())
{
cerr << "Unable to open input file." << endl;
return -1;
}
maxFrames = cap.get(CV_CAP_PROP_FRAME_COUNT);
int origWid = cap.get(CV_CAP_PROP_FRAME_WIDTH);
int origHei = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
Size S = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH) -ver , (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT)-hor);
//char key = 0;
int first = 1, second = 1, third = 1;
int last = 0;
string::size_type pAt = inFile.find_last_of('.'); // Find extension point
const string outFile = inFile.substr(0, pAt) + "-temp4.mov";
output.open(outFile, ex, cap.get(CV_CAP_PROP_FPS), S, true);
Mat *frames = new Mat[maxFrames];
Mat *outFrames = new Mat[maxFrames];
for(int i = 0; i < maxFrames; ++i)
{
cap >> frames[i];
if(frames[i].empty())
{
cout << "Error: unable to read frame " << i << endl;
return 1;
//.........这里部分代码省略.........
示例13: frame_num
inline unsigned frame_num() { return video.get(CV_CAP_PROP_FRAME_COUNT); };
示例14: size
inline Size size() { return Size(video.get(CV_CAP_PROP_FRAME_WIDTH), video.get(CV_CAP_PROP_FRAME_HEIGHT)); };
示例15: openVideo
void fichierControl::openVideo(QString &fileName, QGraphicsView *videoGraphicsview, QProgressBar *progressBar)
{
bool stop = false;
if (!cap.open(fileName.toStdString())){
cout << "Cannot read the frame from video file" << endl;
}
//get the number of frame
long totalFrameNumber = cap.get(CV_CAP_PROP_FRAME_COUNT);
cout<<"Number of frame"<<totalFrameNumber<<endl;
//start the video at 300ms
long frameToStart = 300;
cap.set( CV_CAP_PROP_POS_FRAMES,frameToStart);
cout<<"Frame to start"<<frameToStart<<endl;
//stop the video at 400ms
int frameToStop = 400;
if(frameToStop < frameToStart)
{
cout<<"Frametostop smaller than frametostart!"<<endl;
}
else
{
cout<<"Frame to stop"<<frameToStop<<endl;
}
//get the frames per seconds of the video
double rate = cap.get(CV_CAP_PROP_FPS);
cout<<"the frames per seconds"<<rate<<endl;
int delay = 1000/rate;
currentFrame = frameToStart;
//set the minimum and maximum value of progressBar
progressBar->setMinimum(frameToStart);
progressBar->setMaximum(frameToStop);
//namedWindow("MyVideo",WINDOW_NORMAL); //create a window called "MyVideo"
//resizeWindow("MyVideo", 400, 300);
//Create Trackbar
/*if(totalFrameNumber != 0){
createTrackbar("Position", "MyVideo", ¤tFrame, totalFrameNumber, tbCallback, &frame);
}*/
while(!stop)
{
bool bSuccess = cap.read(frame); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read the frame from video file" << endl;
}
/*******/
if(frame.data){
cvtColor(frame, frame, CV_BGR2RGB); //Qt support RGB, OpenCv support BGR
}else{
cout << "Frame no data" << endl;
}
QImage image = QImage((uchar*)(frame.data), frame.cols, frame.rows, frame.step, QImage::Format_RGB888);
QImage result = image.scaled(800,600).scaled(495,325, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
QGraphicsScene *scene = new QGraphicsScene;
scene->addPixmap(QPixmap::fromImage(result));
videoGraphicsview->setScene(scene);
videoGraphicsview->show();
cout<<"currentFrame"<<currentFrame<<endl;
/*******/
//imshow("MyVideo", frame); //show the frame in "MyVideo" window
if(waitKey(delay) == 27 || currentFrame >= frameToStop) //wait for 'esc' key press for 30 ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
stop = true;
}
//suspendre
if( waitKey(delay) >= 0)
{
waitKey(0);
}
currentFrame++;
progressBar->setValue(currentFrame);
//setTrackbarPos("Position", "MyVideo",currentFrame);
}
//Close video file
cap.release();
waitKey(0);
}