本文整理汇总了C++中VideoCapture::read方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoCapture::read方法的具体用法?C++ VideoCapture::read怎么用?C++ VideoCapture::read使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoCapture
的用法示例。
在下文中一共展示了VideoCapture::read方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char * argv[])
{
//init UART
senderInit();
VideoCapture capture;
capture.open(0);
// Init camera
if (!capture.isOpened())
{
cout << "capture device failed to open!" << endl;
return 1;
}
namedWindow("CT", CV_WINDOW_NORMAL);
setWindowProperty("CT", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
// CT framework
CompressiveTracker ct;
capture.set(CV_CAP_PROP_FRAME_WIDTH, 320);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
//box Init
box = Rect(130, 130, 60, 70);//Rect(x,y,width,height)
int x0=box.x;
int y0=box.y;
int dx=0,dy=0;
// Run-time
if(!open_error_flag)
write(fd,unlock, strlen(unlock));
while(capture.read(frame))
{
if(first_flag){
cvtColor(frame, first_frame, CV_RGB2GRAY);
first_flag=0;
}
skinmask = cvCreateMat(frame.rows, frame.cols, CV_8UC1);
// get frame
cvtColor(frame, current_gray, CV_RGB2GRAY);
absdiff(first_frame,current_gray,fore_frame);
//imshow("foreFrame",fore_frame);
if(!gotHand){
gotHand=getHand();
if(gotHand){
ctInitFlag=1;
}
if ((c = waitKey(15)) == 27){
return 0;
}
continue;
}
// Process Frame
skintracker(frame,skinmask);
fore_frame = fore_frame.mul(skinmask);
// CT initialization
if(ctInitFlag){
ct.init(fore_frame, box);
ctInitFlag=0;
}
//imshow("fore&skinmasked", fore_frame);
ct.processFrame(fore_frame, box);
rectangle(frame, box, Scalar(rgb_b,rgb_g,rgb_r));
// Display
flip(frame, frame, 1);
imshow("CT", frame);
dx=x0-box.x;
dy=y0-box.y;
getGasValue(dy);
getDirValue(dx);
calControlStr(gasValue,dirValue);
sendControlStr();
if ((c = waitKey(15)) == 27){
if(!open_error_flag)
write(fd,lock, strlen(lock));
sleep(1);
break;
}
}
return 0;
}
示例2: RetrieveChessboardCorners
/// Calculates the corner pixel locations as detected by each camera
/// In: s: board settings, includes size, square size and the number of corners
/// inputCapture1: video capture for camera 1
/// inputCapture2: video capture for camera 2
/// iterations: number of chessboard images to take
/// Out: imagePoints1: pixel coordinates of chessboard corners for camera 1
/// imagePoints2: pixel coordinates of chessboard corners for camera 2
bool RetrieveChessboardCorners(vector<vector<Point2f> >& imagePoints1, vector<vector<Point2f> >& imagePoints2, BoardSettings s, VideoCapture inputCapture1,VideoCapture inputCapture2, int iterations)
{
destroyAllWindows();
Mat image1,image2;
vector<Point2f> pointBuffer1;
vector<Point2f> pointBuffer2;
clock_t prevTimeStamp = 0;
bool found1,found2;
int count = 0;
while (count != iterations){
char c = waitKey(15);
if (c == 's'){
cerr << "Calibration stopped" << endl;
return false;
}
// try find chessboard corners
else if(c == 'c'){
// ADAPTIVE_THRESH -> use adaptive thresholding to convert image to B&W
// FAST_CHECK -> Terminates call earlier if no chessboard in image
// NORMALIZE_IMAGE -> normalize image gamma before thresholding
// FILTER_QUADS -> uses additional criteria to filter out false quads
found1 = findChessboardCorners(image1, s.boardSize, pointBuffer1,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK |
CV_CALIB_CB_NORMALIZE_IMAGE | CV_CALIB_CB_FILTER_QUADS);
found2 = findChessboardCorners(image2, s.boardSize, pointBuffer2,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK |
CV_CALIB_CB_NORMALIZE_IMAGE | CV_CALIB_CB_FILTER_QUADS);
if (found1 && found2 && (pointBuffer1.size() >= s.cornerNum) && (pointBuffer2.size() >= s.cornerNum)){
// if time delay passed refine accuracy and store
if ((clock() - prevTimeStamp) > CAPTURE_DELAY * 1e-3*CLOCKS_PER_SEC){
Mat imageGray1, imageGray2;
cvtColor(image1, imageGray1, COLOR_BGR2GRAY);
cvtColor(image2, imageGray2, COLOR_BGR2GRAY);
// refines corner locations
// Size(11,11) -> size of the search window
// Size(-1,-1) -> indicates no dead zone in search size
// TermCriteria -> max 1000 iteration, to get acuraccy of 0.01
cornerSubPix(imageGray1, pointBuffer1, Size(5,5), Size(-1, -1),
TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01));
cornerSubPix(imageGray2, pointBuffer2, Size(5,5), Size(-1, -1),
TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01));
drawChessboardCorners(image1, s.boardSize, Mat(pointBuffer1), found1);
imshow("Image View1", image1);
drawChessboardCorners(image2, s.boardSize, Mat(pointBuffer2), found2);
imshow("Image View2", image2);
// user verifies the correct corners have been found
c = waitKey(0);
if (c == 's'){
return false;
}
if (c == 'y'){
// store the points and store time stamp
imagePoints1.push_back(pointBuffer1);
imagePoints2.push_back(pointBuffer2);
prevTimeStamp = clock();
count++;
cerr << "Count: " << count << endl;
}
}
}
}
inputCapture1.read(image1);
inputCapture2.read(image2);
imshow("Image View1", image1);
imshow("Image View2", image2);
}
// found all corners
return true;
}
示例3: main
int main(int argc, char* argv[])
{
/* CONFIGURAÇÃO SERIAL */
struct termios tio;
struct termios stdio;
struct termios old_stdio;
int tty_usb;
char buffer [33];
/*
* CONFIGURE USB PORT
*/
configureOximeter(&stdio, &tio, &old_stdio);
/*
* OPENNING USB PORT TO I/O COMMUNICATION
*/
openUSB(&tty_usb, &tio);
/*
* LEITURA E ESCRITA NA PORTA USB
*
*/
/* FIM DA CONFIGURAÇÃO SERIAL */
//some boolean variables for different functionality within this
//program
bool trackObjects = true;
bool useMorphOps = true;
//Matrix to store each frame of the webcam feed
Mat cameraFeed;
//matrix storage for HSV image
Mat HSV;
//matrix storage for binary threshold image
Mat threshold;
//x and y values for the location of the object
int x=0, y=0;
//create slider bars for HSV filtering
createTrackbars();
//video capture object to acquire webcam feed
VideoCapture capture;
//open capture object at location zero (default location for webcam)
capture.open(0);
//set height and width of capture frame
capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
//start an infinite loop where webcam feed is copied to cameraFeed matrix
//all of our operations will be performed within this loop
while(1){
//store image to matrix
capture.read(cameraFeed);
//convert frame from BGR to HSV colorspace
cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
//filter HSV image between values and store filtered image to
//threshold matrix
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
//perform morphological operations on thresholded image to eliminate noise
//and emphasize the filtered object(s)
if(useMorphOps)
morphOps(threshold);
//pass in thresholded frame to our object tracking function
//this function will return the x and y coordinates of the
//filtered object
if(trackObjects)
{
trackFilteredObject(x,y,threshold,cameraFeed);
write(tty_usb, &y, 8); //escreve p na porta serial
//read(tty_usb, &leitura, 4); //ler a porta serial
}
//exibir os frames
imshow(windowName2,threshold);
imshow(windowName,cameraFeed);
imshow(windowName1,HSV);
//delay de 30ms para a atualização da tela.
//sem esse comando não aparece imagem!!!!
waitKey(200);
}
return 0;
}
示例4: main
int main(int argc, char* argv[])
{
//some boolean variables for different functionality within this
//program
bool trackObjects = true;
bool useMorphOps = true;
calibrationMode = true;
//Matrix to store each frame of the webcam feed
Mat cameraFeed;
//matrix storage for HSV image
Mat HSV;
//matrix storage for binary threshold image
Mat threshold;
//x and y values for the location of the object
int x = 0, y = 0;
//video capture object to acquire webcam feed
VideoCapture capture;
//open capture object at location zero (default location for webcam)
capture.open(0);
//set height and width of capture frame
capture.set(CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);
//must create a window before setting mouse callback
cv::namedWindow(windowName);
capture.read(cameraFeed);
HSVMouseSelector hsvMouseSelector(&hsv, &cameraFeed);
//set mouse callback function to be active on "Webcam Feed" window
//we pass the handle to our "frame" matrix so that we can draw a rectangle to it
//as the user clicks and drags the mouse
setMouseCallback(windowName, ImageUtils::MouseCallback, &hsvMouseSelector);
//initiate mouse move and drag to false
//start an infinite loop where webcam feed is copied to cameraFeed matrix
//all of our operations will be performed within this loop
while (1){
//store image to matrix
capture.read(cameraFeed);
//convert frame from BGR to HSV colorspace
cvtColor(cameraFeed, HSV, COLOR_BGR2HSV);
//set HSV values from user selected region
hsvMouseSelector.UpdateFrame(&HSV);
//filter HSV image between values and store filtered image to
//threshold matrix
inRange(HSV, hsv.ToMin(), hsv.ToMax(), threshold);
//perform morphological operations on thresholded image to eliminate noise
//and emphasize the filtered object(s)
if (useMorphOps)
morphOps(threshold);
//pass in thresholded frame to our object tracking function
//this function will return the x and y coordinates of the
//filtered object
if (trackObjects)
trackFilteredObject(x, y, threshold, cameraFeed);
//show frames
if (calibrationMode == true){
//create slider bars for HSV filtering
createTrackbars();
imshow(windowName1, HSV);
imshow(windowName2, threshold);
}
else{
destroyWindow(windowName1);
destroyWindow(windowName2);
destroyWindow(trackbarWindowName);
}
imshow(windowName, cameraFeed);
//delay 30ms so that screen can refresh.
//image will not appear without this waitKey() command
//also use waitKey command to capture keyboard input
switch (waitKey(30)) {
case 99:
calibrationMode = !calibrationMode;//if user presses 'c', toggle calibration mode
break;
case 27:
return 0;
}
}
return 0;
}
示例5: main
//.........这里部分代码省略.........
initImage(&d_I, WIDTH, HEIGHT, &pitch);
initMask(&d_mask, h_mask);
// Events allow asynchronous, nonblocking launch of subsequent kernels after a given event has happened,
// such as completion of a different kernel on a different stream.
cudaEvent_t latchFinished;
cudaEventCreate(&latchFinished);
// You should create a new stream for each bitMatcher kernel you want to launch at once.
cudaStream_t streanumKP1, streanumKP2;
cudaStreamCreate(&streanumKP1);
cudaStreamCreate(&streanumKP2);
FAST(img1g, keypoints1, threshold);
extractions += keypoints1.size();
latch( img1g, d_I, pitch, h_K1, d_D1, &numKP1, maxKP, d_K, &keypoints1, d_mask, latchFinished );
FAST(img2g, keypoints2, threshold); // This call to fast is concurrent with above execution.
extractions += keypoints2.size();
latch( img2g, d_I, pitch, h_K2, d_D2, &numKP2, maxKP, d_K, &keypoints2, d_mask, latchFinished );
bitMatcher( d_D1, d_D2, numKP1, numKP2, maxKP, d_M1, matchThreshold, streanumKP1, latchFinished );
bitMatcher( d_D2, d_D1, numKP2, numKP1, maxKP, d_M2, matchThreshold, streanumKP2, latchFinished );
timer = clock();
getMatches(maxKP, h_M1, d_M1);
getMatches(maxKP, h_M2, d_M2);
for (int i=0; i<numKP1; i++) {
if (h_M1[i] >= 0 && h_M1[i] < numKP2 && h_M2[h_M1[i]] == i) {
goodMatches.push_back( DMatch(i, h_M1[i], 0)); // For drawing.
p1.push_back(keypoints1[i].pt); // For recovering pose.
p2.push_back(keypoints2[h_M1[i]].pt);
}
}
img1.copyTo(img0);
img2.copyTo(img1);
cap.read(img2);
cvtColor(img2, img2g, CV_BGR2GRAY);
keypoints0 = keypoints1;
keypoints1 = keypoints2;
uIntSwapPointer = d_D1;
d_D1 = d_D2;
d_D2 = uIntSwapPointer;
numKP0 = numKP1;
numKP1 = numKP2;
FAST(img2g, keypoints2, threshold);
int loopIteration = 0;
for (; loopIteration < maxLoops || maxLoops == -1; loopIteration++) { // Main Loop.
{ // GPU code for descriptors and matching.
cudaEventRecord(start, 0);
extractions += keypoints2.size();
latch( img2g, d_I, pitch, h_K2, d_D2, &numKP2, maxKP, d_K, &keypoints2, d_mask, latchFinished);
bitMatcher( d_D1, d_D2, numKP1, numKP2, maxKP, d_M1, matchThreshold, streanumKP1, latchFinished );
bitMatcher( d_D2, d_D1, numKP2, numKP1, maxKP, d_M2, matchThreshold, streanumKP2, latchFinished );
cudaEventRecord(stop, 0);
}
timer = clock();
{ // Put as much CPU code here as possible.
{ // Display matches and/or video to user.
bool needToDraw = false;
if (showMatches && loopIteration % showMatchesInterval == 0) { // Draw matches.
drawMatches( img0, keypoints0, img1, keypoints1,
goodMatches, imgMatches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
imshow( "Matches", imgMatches );
示例6: main
int main(){
//some boolean variables for added functionality
bool objectDetected = false;
//these two can be toggled by pressing 'd' or 't'
bool debugMode = false;
bool trackingEnabled = true;
//pause and resume code
bool pause = false;
//set up the matrices that we will need
//the two frames we will be comparing
Mat frame1,frame2;
//their grayscale images (needed for absdiff() function)
Mat grayImage1,grayImage2;
//resulting difference image
Mat differenceImage;
//thresholded difference image (for use in findContours() function)
Mat thresholdImage;
//video capture object.
VideoCapture capture;
for(int control=0; control <= 3; control++){
//we can loop the video by re-opening the capture every time the video reaches its last frame
capture.open("test2.mp4");
//capture.open(0);
if(!capture.isOpened()){
cout<<"ERROR ACQUIRING VIDEO FEED\n";
getchar();
return -1;
}
//check if the video has reach its last frame.
//we add '-1' because we are reading two frames from the video at a time.
//if this is not included, we get a memory error!
while(//1
capture.get(CV_CAP_PROP_POS_FRAMES)<capture.get(CV_CAP_PROP_FRAME_COUNT)-2
){
//read first frame
capture.read(frame1);
//convert frame1 to gray scale for frame differencing
cv::cvtColor(frame1,grayImage1,COLOR_BGR2GRAY);
//copy second frame
capture.read(frame2);
//convert frame2 to gray scale for frame differencing
cv::cvtColor(frame2,grayImage2,COLOR_BGR2GRAY);
//perform frame differencing with the sequential images. This will output an "intensity image"
//do not confuse this with a threshold image, we will need to perform thresholding afterwards.
cv::absdiff(grayImage1,grayImage2,differenceImage);
//threshold intensity image at a given sensitivity value
cv::threshold(differenceImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
if(debugMode==true){
//show the difference image and threshold image
cv::imshow("Difference Image",differenceImage);
cv::imshow("Threshold Image", thresholdImage);
}else{
//if not in debug mode, destroy the windows so we don't see them anymore
cv::destroyWindow("Difference Image");
cv::destroyWindow("Threshold Image");
}
//blur the image to get rid of the noise. This will output an intensity image
cv::blur(thresholdImage,thresholdImage,cv::Size(BLUR_SIZE,BLUR_SIZE));
//threshold again to obtain binary image from blur output
cv::threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
if(debugMode==true){
//show the threshold image after it's been "blurred"
imshow("Final Threshold Image",thresholdImage);
}
else {
//if not in debug mode, destroy the windows so we don't see them anymore
cv::destroyWindow("Final Threshold Image");
}
//if tracking enabled, search for contours in our thresholded image
if(trackingEnabled){
searchForMovement(thresholdImage,frame1);
}
//show our captured frame
imshow("Frame1",frame1);
//check to see if a button has been pressed.
//this 10ms delay is necessary for proper operation of this program
//if removed, frames will not have enough time to referesh and a blank
//image will appear.
switch(waitKey(10)){
case 27: //'esc' key has been pressed, exit program.
return 0;
case 116: //'t' has been pressed. this will toggle tracking
trackingEnabled = !trackingEnabled;
if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
else cout<<"Tracking enabled."<<endl;
break;
case 100: //'d' has been pressed. this will debug mode
//.........这里部分代码省略.........
示例7: main
int main(int argc, const char* argv[])
{
const char* keys =
"{ h help | | print help message }"
"{ l left | | specify left image }"
"{ r right | | specify right image }"
"{ c camera | 0 | enable camera capturing }"
"{ v video | | use video as input }"
"{ o output | pyrlk_output.jpg| specify output save path when input is images }"
"{ points | 1000 | specify points count [GoodFeatureToTrack] }"
"{ min_dist | 0 | specify minimal distance between points [GoodFeatureToTrack] }"
"{ m cpu_mode | false | run without OpenCL }";
CommandLineParser cmd(argc, argv, keys);
if (cmd.has("help"))
{
cout << "Usage: pyrlk_optical_flow [options]" << endl;
cout << "Available options:" << endl;
cmd.printMessage();
return EXIT_SUCCESS;
}
bool defaultPicturesFail = true;
string fname0 = samples::findFile(cmd.get<string>("left"));
string fname1 = samples::findFile(cmd.get<string>("right"));
string vdofile = cmd.get<string>("video");
string outfile = cmd.get<string>("output");
int points = cmd.get<int>("points");
double minDist = cmd.get<double>("min_dist");
int inputName = cmd.get<int>("c");
UMat frame0;
imread(fname0, IMREAD_GRAYSCALE).copyTo(frame0);
UMat frame1;
imread(fname1, IMREAD_GRAYSCALE).copyTo(frame1);
vector<cv::Point2f> pts(points);
vector<cv::Point2f> nextPts(points);
vector<unsigned char> status(points);
vector<float> err;
cout << "Points count : " << points << endl << endl;
if (frame0.empty() || frame1.empty())
{
VideoCapture capture;
UMat frame, frameCopy;
UMat frame0Gray, frame1Gray;
UMat ptr0, ptr1;
if(vdofile.empty())
capture.open( inputName );
else
capture.open(vdofile.c_str());
int c = inputName ;
if(!capture.isOpened())
{
if(vdofile.empty())
cout << "Capture from CAM " << c << " didn't work" << endl;
else
cout << "Capture from file " << vdofile << " failed" <<endl;
if (defaultPicturesFail)
return EXIT_FAILURE;
goto nocamera;
}
cout << "In capture ..." << endl;
for(int i = 0;; i++)
{
if( !capture.read(frame) )
break;
if (i == 0)
{
frame.copyTo( frame0 );
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
}
else
{
if (i%2 == 1)
{
frame.copyTo(frame1);
cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY);
ptr0 = frame0Gray;
ptr1 = frame1Gray;
}
else
{
frame.copyTo(frame0);
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
ptr0 = frame1Gray;
ptr1 = frame0Gray;
}
pts.clear();
goodFeaturesToTrack(ptr0, pts, points, 0.01, 0.0);
if(pts.size() == 0)
//.........这里部分代码省略.........
示例8: main
/**
* @main function
**/
int main(int argc,char const *argv[])
{
if (argc != 5)
{
printf("Invalid argumen!\n");
printf("-- LightMusic <camera_number> <buffer_length> <low_freq> <hi_freq>\n");
printf("-- Press Esc to exit\n");
printf("ex : LightMusic 1 5620 261 1760\n");
printf("-- <camera_number> : device number of camera (from 1 to 99)\n");
printf("-- <buffer_lenght> : buffer lenght used (from 1000 to 20000)\n");
printf("-- <low_freq> : freq of lowest tone, low 261, mid 523, hi 1046\n");
printf("-- <hi_freq> : freq of highest tone, low 493, mid 987, hi 1760\n");
printf("CAUTION!!\n");
printf("-- bigger number of buffer length, slower frame scan run\n");
printf("-- smaller number of buffer length, bigger playback sound glitch occur\n");
printf("-- find right number of buffer length depending on your hardware\n");
printf("LightMusic -- developed by Lonehack\n");
return 0;
}
int cam = atoi(argv[1]);
BUFFER_LEN = atoi(argv[2]);
lotone = atoi(argv[3]);
hitone = atoi(argv[4]);
//-- Video prepare
VideoCapture capture;
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 360);
Mat frame;
time_t start, finish;
//-- Sound error handling
if ((err = snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK, 0)) < 0)
{
printf(" --(!) Playback open error: %s --\n", snd_strerror(err));
exit(EXIT_FAILURE);
}
if ((err = snd_pcm_set_params(handle,
SND_PCM_FORMAT_FLOAT,
SND_PCM_ACCESS_RW_INTERLEAVED,
1,
44100, //samplerate, standart 44100
1,
80200)) < 0) //latency, standart 2x samplerate
{
printf(" --(!) Playback open error: %s --\n", snd_strerror(err));
exit(EXIT_FAILURE);
}
//-- Opening video stream
// for (int cam=1;cam<100;cam++)
// {
// capture.open( cam ); //-- opening input : ( -1 ) any camera or camera number (1,...,99), ( argv[1] ) video file
// }
capture.open( cam ); //-- opening input : ( -1 ) any camera or camera number (1,...,99), ( argv[1] ) video file
//-- Checking interface
if ( ! capture.isOpened() )
{
printf("--(!)Error opening video capture --\n");
return -1;
}
//-- Start the clock
time(&start);
int counter=0;
//-- Read captured
while ( capture.read(frame) )
{
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!\n");
break;
}
//-- fix image resolution
resize(frame, frame, Size(640, 360), 0, 0, INTER_CUBIC);
//-- Show original frame
//namedWindow(window_name_0,CV_WINDOW_NORMAL|CV_WINDOW_KEEPRATIO);
//imshow( window_name_0,frame );
//-- flip frame
flip(frame, frame, -1);
//-- Apply the lightDetect
lightDetect(frame);
//printf("X = %d, Y = %d, Inten = %d \n", PosX, PosY, inten_frame);
//-- apply sound parameter
SineWave(PosX, PosY);
//Stop the clock and show FPS
time(&finish);
counter++;
//.........这里部分代码省略.........
示例9: loop
static void loop() {
stream1.read(frame);
//resize(frame, frame, Size(frame.cols*0.8, frame.rows*0.8));
cvtColor(frame, grey, COLOR_BGR2GRAY);
if (started) {
cv::Mat rvec(3, 1, cv::DataType<double>::type);
cv::Mat tvec(3, 1, cv::DataType<double>::type);
float scale = 0;
detector->detect(grey, kpts2);
cv::KeyPointsFilter::retainBest(kpts2, MAX_FEATURES);
descriptor->compute(grey, kpts2, desc2);
frames++;
if (desc2.cols > 5 && frames > 0) {
frames = 0;
matcher.match(desc1, desc2, matches);
if (matches.size() > 5) {
double max_dist = 0; double min_dist = 1000;
std::vector< DMatch > good_matches;
std::vector<KeyPoint> matched1, matched2;
for (int i = 0; i < matches.size(); i++) {
if (matches[i].distance < 20) {
good_matches.push_back(matches[i]);
matched1.push_back(kpts1[matches[i].queryIdx]);
matched2.push_back(kpts2[matches[i].trainIdx]);
}
}
KeyPoint::convert(matched1, init);
KeyPoint::convert(matched2, points2);
float avg_dist = 0;
for (size_t i = 0; i < good_matches.size(); i++) {
double dist = norm(init[i] - points2[i]);
avg_dist += dist;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
avg_dist = avg_dist / good_matches.size();
int k = 0;
for (int i = 0; i < init.size(); i++) {
double dist = norm(init[i] - points2[i]);
//printf("%f\n", dist);
if (dist > avg_dist){
continue;
}
points2[k] = points2[i];
init[k] = init[i];
k++;
}
points2.resize(k);
init.resize(k);
if (good_matches.size() > 10 && init.size() > 6) {
float f = K.at<double>(0, 0);
Point2f pp(K.at<double>(0, 2), K.at<double>(1, 2));
E = findEssentialMat(init, points2, f, pp, RANSAC, 0.999, 1.0, mask);
int inliers = recoverPose(E, init, points2, R, T, f, pp, mask);
if (inliers > 10){
printf("%d\n", inliers);
hconcat(R, T, M1);
cv::Mat row = cv::Mat::zeros(1, 4, CV_64F);
row.at<double>(0, 3) = 1;
M1.push_back(row);
//print(M1);
totalT = totalT*M1;
Mat rot;
totalT(cv::Range(0, 3), cv::Range(0, 3)).copyTo(rot);
Mat rotv;
Rodrigues(rot, rotv);
poseplot(Range(0, 100), Range(0, 300)) = 0;
char buff1[50];
int fontFace = QT_FONT_NORMAL;
double fontScale = 0.5f;
int thickness = 1;
sprintf(buff1, "x:%+.1f y:%+.1f z:%+.1f", rotv.at<double>(0, 0) * (180 / CV_PI),
(rotv.at<double>(1, 0) / CV_PI) * 180, (rotv.at<double>(2, 0) / CV_PI) * 180);
string text(buff1);
putText(poseplot, text, Point(0, 20), fontFace, fontScale, Scalar::all(255), thickness, 8);
circle(poseplot, Point(100 + totalT.at<double>(0, 3) * 3, 100 + totalT.at<double>(1, 3)) * 3, 2, Scalar(0, 255, 0));
}
kpts1.clear();
for (int i = 0; i < kpts2.size(); i++) {
kpts1.push_back(kpts2[i]);
}
desc2.copyTo(desc1);
}
}
}
}
if (mask.rows > 0) {
for (size_t i = 0; i < min(init.size(), points2.size()); i++) {
circle(frame, init[i], 2, Scalar(0, 255, 0));
if ((int)mask.at<uchar>(i, 0)) {
line(frame, init[i], points2[i], Scalar(0, 255, 0));
}
else{
//.........这里部分代码省略.........
示例10: main
int main(int argc, char** argv)
{
CV_TRACE_FUNCTION();
cv::CommandLineParser parser(argc, argv,
"{help h ? | | help message}"
"{n | 100 | number of frames to process }"
"{@video | 0 | video filename or cameraID }"
);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
VideoCapture capture;
std::string video = parser.get<string>("@video");
if (video.size() == 1 && isdigit(video[0]))
capture.open(parser.get<int>("@video"));
else
capture.open(video);
int nframes = 0;
if (capture.isOpened())
{
nframes = (int)capture.get(CAP_PROP_FRAME_COUNT);
cout << "Video " << video <<
": width=" << capture.get(CAP_PROP_FRAME_WIDTH) <<
", height=" << capture.get(CAP_PROP_FRAME_HEIGHT) <<
", nframes=" << nframes << endl;
}
else
{
cout << "Could not initialize video capturing...\n";
return -1;
}
int N = parser.get<int>("n");
if (nframes > 0 && N > nframes)
N = nframes;
cout << "Start processing..." << endl
<< "Press ESC key to terminate" << endl;
UMat frame;
for (int i = 0; N > 0 ? (i < N) : true; i++)
{
CV_TRACE_REGION("FRAME"); // OpenCV Trace macro for named "scope" region
{
CV_TRACE_REGION("read");
capture.read(frame);
if (frame.empty())
{
cerr << "Can't capture frame: " << i << std::endl;
break;
}
// OpenCV Trace macro for NEXT named region in the same C++ scope
// Previous "read" region will be marked complete on this line.
// Use this to eliminate unnecessary curly braces.
CV_TRACE_REGION_NEXT("process");
process_frame(frame);
CV_TRACE_REGION_NEXT("delay");
if (waitKey(1) == 27/*ESC*/)
break;
}
}
return 0;
}
示例11: loop
void loop() {
/// Add coordinate axes
myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem());;
// Read camera frame
stream1.read(frame);
//resize(frame, frame, Size(frame.cols*0.8, frame.rows*0.8));
cvtColor(frame, grey, COLOR_BGR2GRAY);
// move to function?
if (!points1.empty()) {
calcOpticalFlowPyrLK(prevGray, grey, points1, points2, status, err, winSize, 3, termcrit, 0, 0.001);
// remove bad tracks
size_t k;
for (size_t i = k = 0; i < points2.size(); i++) {
if (!status[i])
continue;
points2[k] = points2[i];
points1[k] = points1[i];
init[k] = init[i];
if(rdpoints){
init3dpoints[k] = init3dpoints[i];
}
k++;
circle(frame, points2[i], 2, Scalar(0, 255, 0), -1, 8);
if (!rdpoints){
line(frame, init[i], points2[i], Scalar(0, 255, 0));
}
}
points1.resize(k);
points2.resize(k);
init.resize(k);
if (rdpoints) {
init3dpoints.resize(k);
}
}
if (points1.size() > 8) {
totalT = totalT + T;
cv::Mat rvec(3, 1, cv::DataType<double>::type);
cv::Mat tvec(3, 1, cv::DataType<double>::type);
float scale = 0;
if (init3dpoints.size() > 0) {
solvePnPRansac(init3dpoints, points2, K, noArray(), rvec, tvec, false, 200,4);
T = tvec;
Rodrigues(rvec, R);
/*frames++;
T = T + tvec;
if (frames == 3) {
T = T / 3;
circle(poseplot, Point(200 + T.at<double>(0, 0) * 100, 200 + T.at<double>(1, 0) * 100), 2, Scalar(0, 255, 0));
T = Mat::zeros(3, 1, CV_64F);
frames = 0;
}*/
}
}
imshow("cam", frame);
int key = waitKey(15);
if (key == ' ') {
if (started && !rdpoints) {
rdpoints = 1;
float f = K.at<double>(0, 0);
Point2f pp(K.at<double>(0, 2), K.at<double>(1, 2));
Mat E = findEssentialMat(init, points2, f, pp, RANSAC, 0.99, 1.0, mask);
int inliers = recoverPose(E, init, points2, R, T, f, pp);
hconcat(R, T, M1);
triangulate_points(K*M0, K*M1, init, points2, &init3dpoints);
c3dpoints.clear();
for (int i = 0; i < init3dpoints.size(); i++) {
c3dpoints.push_back(init3dpoints[i]/10);
}
}
}
std::swap(points2, points1);
cv::swap(prevGray, grey);
if (key == ' ' && !rdpoints) {
started = 1;
// features and keypoints for object
img1 = grey.clone();
keyframes.push_back(img1);
kpts1.clear();
init.clear();
goodFeaturesToTrack(img1, points1, MAX_FEATURES, 0.01, 15, Mat(), 3, 0, 0.04);
//cornerSubPix(img1, points1, subPixWinSize, Size(-1, -1), termcrit);
for (size_t i = 0; i < points1.size(); i++) {
kpts1.push_back(cv::KeyPoint(points1[i], 1.f));
init.push_back(Point2f(points1[i]));
}
}
else if (key == 'q') {
//.........这里部分代码省略.........
示例12: main
int main(int argc, char * argv[]){
VideoCapture capture;
FileStorage fs;
FileStorage detector_file;
bool fromfile=false;
//Read options
CommandLineParser parser(argc, argv, keys);
int init_frame = parser.get<int>("i");
string param_file = parser.get<string>("p");
string video = parser.get<string>("s");
string init_bb = parser.get<string>("b");
fs.open(param_file, FileStorage::READ);
if (video != "null"){
fromfile=true;
capture.open(video);
}else{
fromfile=false;
capture.open(0);
}
if (init_bb !="null"){
readBB(init_bb.c_str());
gotBB =true;
}
//Init camera
if (!capture.isOpened()){
cout << "capture device failed to open!" << endl;
return 1;
}
//Register mouse callback to draw the bounding box
cvNamedWindow("Tracker",CV_WINDOW_AUTOSIZE);
cvNamedWindow("Features",CV_WINDOW_AUTOSIZE);
cvSetMouseCallback( "Tracker", mouseHandler, NULL );
FILE *bb_file = fopen("bounding_boxes.txt","w");
Mat frame;
Mat last_gray;
Mat first;
if (fromfile){
capture.set(CV_CAP_PROP_POS_FRAMES,init_frame);
capture.read(frame);
last_gray.create(frame.rows,frame.cols,CV_8U);
cvtColor(frame, last_gray, CV_BGR2GRAY);
frame.copyTo(first);
}
///Initialization
GETBOUNDINGBOX:
while(!gotBB){
if (!fromfile) capture.read(frame);
else first.copyTo(frame);
cvtColor(frame, last_gray, CV_BGR2GRAY);
drawBox(frame,box);
imshow("Tracker", frame);
if (cvWaitKey(30) == 'q')
return 0;
}
if (min(box.width,box.height)<(int)fs.getFirstTopLevelNode()["min_win"]){
cout << "Bounding box too small, try again." << endl;
gotBB = false;
goto GETBOUNDINGBOX;
}
drawBox(frame,box);
imshow("Tracker", frame);
//Remove callback
cvSetMouseCallback( "Tracker", NULL, NULL );
printf("Initial Bounding Box = x:%d y:%d h:%d w:%d\n",box.x,box.y,box.width,box.height);
//Output file
fprintf(bb_file,"%d,%d,%d,%d,%f\n",box.x,box.y,box.br().x,box.br().y,1.0);
INIT:
// Framework
Alien tracker(fs.getFirstTopLevelNode());
tracker.init(last_gray,box);
cvWaitKey();
///Run-time
Mat current_gray;
RotatedRect pbox;
bool status=true;
int frames = 1;
int detections = 1;
float conf;
while(capture.read(frame)){
cvtColor(frame, current_gray, CV_BGR2GRAY);
cout << endl;
//Process Frame
double t=(double)getTickCount();
conf = tracker.processFrame(last_gray,current_gray,pbox,status);
t = ((double)getTickCount() - t)*1000/getTickFrequency();
//Draw Box
if (status){
drawBox(frame,pbox);
fprintf(bb_file,"%f,%f,%f,%f,%f,%f,%f\n",pbox.center.x, pbox.center.y, pbox.size.height,pbox.size.width, pbox.angle,conf,t);
detections++;
}
else{
fprintf(bb_file,"NaN,NaN,NaN,NaN,%f,%f\n",conf,t);
//.........这里部分代码省略.........
示例13: TFind
CPoint TFind()
{ char op[255];
// cout<<"NO CALL?";
// cout<<"??"<<endl;
CPoint cvoid = {-1 -1};
if(!cap->isOpened()){cout<<"ERROR: no camera";return cvoid;}
Mat mat,mat2;
// cout << "Pre Cap";
// cout << endl;
{
AGAIN:
cap->read(mat);
cvtColor(mat, mat2, CV_BGR2HLS);
static int PK=0;
sprintf(op,"_%d.bmp",PK++);
imwrite(op,mat2);
// (*cap) >> mat2;
// cout << "Post Cap";
// cout << "FAIL_CONVERT?";
Mat_ <Vec3b> Frame(mat);
Mat_ <Vec3b> OFrame(mat2);
cv::Size sks = Frame.size();
int i,j;
int SX,SY,ct;
SX=0;SY=0;ct=0;
int FW = 5;
for(i=FW;i<sks.height-FW;i++)
for(j=FW;j<sks.width-FW;j++)
{
/*
int a=(OFrame(i-FW,j)[0] + OFrame(i,j+FW)[0] +
OFrame(i+FW,j)[0] + OFrame(i,j-FW)[0]-4*OFrame(i,j)[0]);
if(a<0)a+=256;
Frame(i,j)[0]=a;
Frame(i,j)[1]=OFrame(i,j)[1];//(OFrame(i-FW,j)[1] + OFrame(i,j+FW)[1] +
//OFrame(i+FW,j)[1] + OFrame(i,j-FW)[1]-4*OFrame(i,j)[1]);
Frame(i,j)[2]=OFrame(i,j)[2];//(OFrame(i-FW,j)[2] + OFrame(i,j-FW)[2] +
*/
//OFrame(i+FW,j)[2] + OFrame(i,j+FW)[2]-4*OFrame(i,j)[2]);
if((OFrame(i,j)[0] < 21 && OFrame(i,j)[0] > 14))// && OFrame(i,j)[0] < 20 )
{ //the value is red hue (20 > h | h > 240)
if(OFrame(i,j)[1] > 70 && OFrame(i,j)[2] > 165)// && OFrame(i,j)[1] > 65 && OFrame(i,j)[2] > 170 && OFrame(i,j)[2] < 240) //vary the luminance values. Black and White can be red with Very Low or Very High luminance
//off-white can be red with low saturation and high luminance. something similar for black
{
SX+=j;
SY+=i;
ct++;
OFrame(i,j)[0]=90;
OFrame(i,j)[1]=140;
OFrame(i,j)[2]=255;
}
}
else
{
//OFrame(i,j)[0]=255;
//OFrame(i,j)[1]=0;
//OFrame(i,j)[2]=0;
}
}
// cout << ct;
// cout << endl;
if(ct !=0){
SX = SX / ct;
SY = SY / ct;
}else{SX=SY=-1;}
// cout << "SEE SOMETHING?";
// cout << endl;
for(i=0;i<5;i++)
for(j=0;j<5;j++)
OFrame(i,j)[0]=255;
OFrame(i,j)[1]=157;
OFrame(i,j)[2]=10;
cvtColor(mat2, mat, CV_HLS2BGR);
//if(SX > 4 && SX < 356 && SY > 4 && SY < 236)
//for(i=-3;i<=3;i++)
//for(j=-3;j<=3;j++)
//{OFrame(SY+i,SX+j)[0]=0; OFrame(SY+i,SX+j)[1]=255; OFrame(SY+i,SX+j)[2]=0;} //green (the gotton format is BGR here)
sprintf(op,"_%d-f.bmp",PK++);
imwrite(op,mat);
sleep(5);
goto AGAIN;
}
CPoint RT;
//RT.x=SX;
//RT.y=SY;
return RT;
}
示例14: main
int main( int argc, char** argv )
{
// Benchmark variables
//double filteringTime=0 , noiseTime=0, contourTime=0, readTime=0, waitFinal=0, totalTime, finalLoop;
Benchmark bench;
// Video vars
VideoCapture cap;
TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
Size subPixWinSize(10,10), winSize(31,31);
// Get the video (filename or device)
cap.open("IMG_6214.JPG");
if( !cap.isOpened() )
{
cout << "Could not initialize capturing...\n";
return 0;
}
// Display vars
int i = 0 ;
int n = cap.get(CV_CAP_PROP_FRAME_COUNT);
#ifdef DISPLAY
// Create the windows
namedWindow(WINDOW_ORIGIN) ;
namedWindow(WINDOW_THRESHOLD);
namedWindow(WINDOW_THRESHOLD_NOISE);
namedWindow(WINDOW_THRESHOLD_NOISE_BLUR);
namedWindow(WINDOW_CONFIG);
createTrackbar(TRACKBAR_HUE_MIN, WINDOW_CONFIG, &H_min, 255) ;
createTrackbar(TRACKBAR_HUE_MAX, WINDOW_CONFIG, &H_max, 255) ;
createTrackbar(TRACKBAR_SATURATION_MIN, WINDOW_CONFIG, &S_min, 255) ;
createTrackbar(TRACKBAR_SATURATION_MAX, WINDOW_CONFIG, &S_max, 255) ;
createTrackbar(TRACKBAR_VALUE_MIN, WINDOW_CONFIG, &V_min, 255) ;
createTrackbar(TRACKBAR_VALUE_MAX, WINDOW_CONFIG, &V_max, 255) ;
moveWindow(WINDOW_ORIGIN, 0, 0) ;
moveWindow(WINDOW_THRESHOLD, 0, 0);
moveWindow(WINDOW_THRESHOLD_NOISE, 0, 0);
moveWindow(WINDOW_THRESHOLD_NOISE_BLUR, 0, 0);
moveWindow(WINDOW_CONFIG, 0, 0);
namedWindow(WINDOW_TEST);
moveWindow(WINDOW_TEST, 0, 0);
#endif
#ifdef OCL
ocl::PlatformsInfo platforms;
ocl::getOpenCLPlatforms(platforms);
ocl::DevicesInfo devices;
ocl::getOpenCLDevices(devices);
std::cout << "platforms " << platforms.size() << " devices " << devices.size() << " " << devices[0]->deviceName << std::endl;
ocl::setDevice(devices[0]);
#endif
// déclaration des threads
pthread_t threadMaths, threadWall;
//pthread_create(&threadWall, NULL, mathsRoutine, arg)
gettimeofday(&bench.beginTime, NULL);
BallState ballState_t1, ballState_t2;
int numberOfTreatedLoop = 0, numberOfNonTreatedLoop = 0, noTreatment = 0;
for(i=0 ; i < n ; i++) // boucle for car vidéo, avec un stream, sûrement un while(1)
{
if(noTreatment <= 0)//la balle doit être dans une position intéressante pour la vue à approfondir
{
numberOfTreatedLoop++;
numberOfNonTreatedLoop = 0;
ballState_t1 = ballState_t2;
std::vector<CircleFound> circles;
CircleFound ballCircle;
findBall(cap, bench, circles);
ballCircle = getBestCircle(circles);
if(ballCircle.radius == 0) // means no circle
//break;
getBallPosition(ballCircle.x, ballCircle.y, ballCircle.radius*2, ballState_t2);
calculateBallSpeed(ballState_t2);
if(ballState_t2.vy < 0) // si la balle revient en arrière, on arrete le traitement un instant
{
noTreatment = 3;
}
copyStateToMaths(ballState_t1, ballState_t2, CI);
// Start maths part
if(ballState_t2.v)
pthread_create(&threadMaths, NULL, Pb_Inv, NULL);
#ifdef DEBUG
cout << "x : "<< ballState.x << " y : " << ballState.y << " z : " << ballState.z <<endl;
#endif
#ifdef DISPLAY
#ifdef PICTURE
char c = (char)waitKey(100000);
#else
char c = (char)waitKey(1);
#endif
//.........这里部分代码省略.........
示例15: grabFrame
bool grabFrame()
{
Mat cap, frame, red, red_lower, red_upper, yellow;
bool frame_available;
frame_available = camera.read(cap);
if(!frame_available)
{
cout << "Unable to grab frame from camera, might not be initialized or maybe unpluged?\n";
return 1;
}
cvtColor(cap, frame, CV_BGR2HSV); //convert to HSV from RGB
inRange(frame, lowLowerRed, highLowerRed, red_lower);
inRange(frame, lowUpperRed, highUpperRed, red_upper);
inRange(frame, lowYellow, highYellow, yellow);
cleanThresholdedImage(red_lower);
cleanThresholdedImage(red_upper);
cleanThresholdedImage(yellow);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
vector<Rect> bounding_rects;
//same kit as above for the yellow
findContours(yellow, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
for( int i = 0; i < contours.size(); i++ )
{
bounding_rects.push_back( boundingRect( Mat(contours[i]) ) );
}
if(bounding_rects.size() > 0)
{
Rect largest = largestRectInFrame(bounding_rects);
rectangle( yellow, largest, Scalar(150, 127, 200), 1, 8);
if(largest.area() > AREA_THRESHOLD)
{
cout << "Yellow object center at: (" << (largest.x + largest.width/2)
<< ", " << (largest.y + largest.height/2) << ")" << endl;
yellow_object_seen = true;
}
else
{
cout << "There's somehting yellow there, but not big enough." << endl;
yellow_object_seen = false;
}
}
else
{
cout << "Nothing yellow, fam." << endl;
yellow_object_seen = false;
}
contours.clear();
hierarchy.clear();
bounding_rects.clear();
//same thing for the red
red = red_lower + red_upper;
findContours(red, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
for( int i = 0; i < contours.size(); i++ )
{
bounding_rects.push_back( boundingRect( Mat(contours[i]) ) );
}
if(bounding_rects.size() > 0)
{
Rect largest = largestRectInFrame(bounding_rects);
rectangle( red, largest, Scalar(150, 127, 200), 1, 8);
if(largest.area() > AREA_THRESHOLD)
{
cout << "Red object center at: (" << (largest.x + largest.width/2)
<< ", " << (largest.y + largest.height/2) << ")" << endl;
red_object_seen = true;
}
else
{
cout << "There's somehting red there, but not big enough." << endl;
red_object_seen = false;
}
}
else
{
cout << "No red, fam." << endl;
red_object_seen = false;
}
contours.clear();
hierarchy.clear();
bounding_rects.clear();
return 0;
}