本文整理汇总了C++中VideoWriter::isOpened方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoWriter::isOpened方法的具体用法?C++ VideoWriter::isOpened怎么用?C++ VideoWriter::isOpened使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoWriter
的用法示例。
在下文中一共展示了VideoWriter::isOpened方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, const char **argv)
{
// create an image (3 channels, 16 bit image depth,
// 650 high, 600 wide, (0, 50000, 50000)) assigned for
// Blue, Green and Red plane respectively.)
Mat img(650, 600, CV_16UC3, Scalar(0, 50000, 50000));
if (img.empty())
{
cout << "ERROR : Image cannot be loaded..!!" << endl;
return -1;
}
// vector that stores the compression parameters of the image
vector<int> compression_params;
// specify the compression technique
compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);
// specify the compression quality
compression_params.push_back(98);
// write the image to file
bool bSuccess = imwrite("./testImage.jpg", img, compression_params);
if (!bSuccess)
{
cout << "ERROR : Failed to save the image" << endl;
}
// create a window with the name "MyWindow"
namedWindow("MyWindow", CV_WINDOW_AUTOSIZE);
// display the image which is stored in the 'img' in the "MyWindow" window
imshow("MyWindow", img);
waitKey(0);
destroyWindow("MyWindow");
// write video to file
VideoCapture cap(0); // open the video camera no. 0
if (!cap.isOpened()) // if not success, exit program
{
cout << "ERROR: Cannot open the video file" << endl;
return -1;
}
namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
cout << "Frame Size = " << dWidth << "x" << dHeight << endl;
Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));
VideoWriter oVideoWriter ("./MyVideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object
if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program
{
cout << "ERROR: Failed to write the video" << endl;
return -1;
}
while (1)
{
Mat frame;
bool bSuccess = cap.read(frame); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
cout << "ERROR: Cannot read a frame from video file" << endl;
break;
}
oVideoWriter.write(frame); //writer the frame into the file
imshow("MyVideo", frame); //show the frame in "MyVideo" window
if (waitKey(10) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
示例2: grabarVideo
void grabarVideo(Mat frame, VideoCapture cap)
{
bool static isRecording = false;
VideoWriter static writer;
time_t static vidDelta = 0;
int vidFps = 10;
int fourcc = CV_FOURCC(vidCodec[0],vidCodec[1],vidCodec[2], vidCodec[3]);
int imgInterval = 60; // seconds
int imgNum = 0;
time_t sec;
long static frameNum = 0;
bool isDisplayEnabled = false;
// int delay = 1;
int vidNum = 1;
bool isRecordingEnabled = vidNum > 0 ? true : false;
bool isImageCaptureEnabled = imgNum > 0 ? true : false;
time_t vidTime = 20;
int vidTotal = 0;
time_t imgTime = 0;
time_t imgDelta = 0;
int imgTotal = 0;
int vidInterval = 60; // seconds
double fps = 0.0;
sec = time(NULL);
frameNum++;
if (isDisplayEnabled)
{
if(!frame.empty())
imshow("Current Frame", frame);
}
// Decide whether to create new video file
if ((isRecordingEnabled) && (!isRecording))
{
int width = (int)cap.get(CV_CAP_PROP_FRAME_WIDTH);
int height = (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT);
writer = createVideoFile(vidDir, width, height, vidFps, fourcc, sec);
if(writer.isOpened())
{
vidTime = sec;
isRecording = true;
frameNum = 0;
}
else
{
cout<< "No se pudo abrir el directorio: "<<vidDir<<endl;
isRecordingEnabled=false;
}
}
// Write frame to video, calculate time interval and whether or not to create new video file
if (isRecordingEnabled)
{
writer.write(frame);
vidDelta = sec - vidTime;
// cout << "vidDelta "<<vidDelta<<" >= "<<vidInterval<<endl;
if (vidDelta >= vidInterval) {
// isRecording = false;
vidTotal = vidTotal + 1;
// cout << "Videos recorded =" << vidTotal << "/" << vidNum << endl;
// cout << "vidTotal="<<vidTotal<<" vidNum="<<vidNum<<endl;
if (vidTotal >= vidNum) {
isRecordingEnabled = false;
if (vidDelta > 0) {
fps = frameNum / vidDelta;
frameNum = 0;
}
// cout << "Recording completed fps=" << fps << endl;
if (isDisplayEnabled) {
writer = VideoWriter();
}
}
}
}
if (isImageCaptureEnabled) {
imgDelta = (sec - imgTime);
if (imgDelta >= imgInterval) {
writeImageFile(imgDir, frame, imgFmt, sec);
imgTime = sec;
imgTotal = imgTotal + 1;
//.........这里部分代码省略.........
示例3: main
int main(int argc, char ** argv)
{
if (!parseArguments(argc, argv))
{
showHelp(argv[0], false);
return -1;
}
VideoCapture cap(GlobalArgs.deviceName);
if (!cap.isOpened())
{
cout << "Cannot find device " << GlobalArgs.deviceName << endl;
showHelp(argv[0], false);
return -1;
}
VideoWriter videoWriter;
Mat frame;
FocusState state = createInitialState();
bool focus = true;
bool lastSucceeded = true;
namedWindow(windowOriginal, 1);
// Get settings:
if (GlobalArgs.verbose)
{
if ((cap.get(CAP_PROP_GPHOTO2_WIDGET_ENUMERATE) == 0)
|| (cap.get(CAP_PROP_GPHOTO2_WIDGET_ENUMERATE) == -1))
{
// Some VideoCapture implementations can return -1, 0.
cout << "This is not GPHOTO2 device." << endl;
return -2;
}
cout << "List of camera settings: " << endl
<< (const char *) (intptr_t) cap.get(CAP_PROP_GPHOTO2_WIDGET_ENUMERATE)
<< endl;
cap.set(CAP_PROP_GPHOTO2_COLLECT_MSGS, true);
}
cap.set(CAP_PROP_GPHOTO2_PREVIEW, true);
cap.set(CAP_PROP_VIEWFINDER, true);
cap >> frame; // To check PREVIEW output Size.
if (GlobalArgs.output != NULL)
{
Size S = Size((int) cap.get(CAP_PROP_FRAME_WIDTH), (int) cap.get(CAP_PROP_FRAME_HEIGHT));
int fourCC = CV_FOURCC('M', 'J', 'P', 'G');
videoWriter.open(GlobalArgs.output, fourCC, GlobalArgs.fps, S, true);
if (!videoWriter.isOpened())
{
cerr << "Cannot open output file " << GlobalArgs.output << endl;
showHelp(argv[0], false);
return -1;
}
}
showHelp(argv[0], true); // welcome msg
if (GlobalArgs.minimumFocusStep == 0)
{
state.minFocusStep = findMinFocusStep(cap, FOCUS_STEP / 16, -FOCUS_DIRECTION_INFTY);
}
else
{
state.minFocusStep = GlobalArgs.minimumFocusStep;
}
focusDriveEnd(cap, -FOCUS_DIRECTION_INFTY); // Start with closest
char key = 0;
while (key != 'q' && key != 27 /*ESC*/)
{
cap >> frame;
if (frame.empty())
{
break;
}
if (GlobalArgs.output != NULL)
{
videoWriter << frame;
}
if (focus && !GlobalArgs.measure)
{
int stepToCorrect = correctFocus(lastSucceeded, state, rateFrame(frame));
lastSucceeded = cap.set(CAP_PROP_ZOOM,
max(stepToCorrect, state.minFocusStep) * state.direction);
if ((!lastSucceeded) || (stepToCorrect < state.minFocusStep))
{
if (--GlobalArgs.breakLimit <= 0)
{
focus = false;
state.step = state.minFocusStep * 4;
cout << "In focus, you can press 'f' to improve with small step, "
"or 'r' to reset." << endl;
}
}
else
{
GlobalArgs.breakLimit = DEFAULT_BREAK_LIMIT;
}
}
else if (GlobalArgs.measure)
{
//.........这里部分代码省略.........
示例4: constructGraph
//.........这里部分代码省略.........
list<int> traverseList;
int currentVertex = 1, endVertex = 80, nextVertex;
traverseList.push_back(currentVertex);
nextVertex = findNextVertex(currentVertex, adjacencyList, noOfVerticesLeft);
cout << "Debug 44:: Next Vertex" << nextVertex << "adjacencyList size:" << adjacencyList.size() << endl;
int debugWhile = 0;
while((noOfVerticesLeft!=1) && (nextVertex != endVertex)){
cout << "Debug 55:: Inside while. Next Vertex:: " << nextVertex << " VerticesLeft:: " << noOfVerticesLeft << " debugWhile::" << debugWhile << endl;
traverseList.push_back(nextVertex);
currentVertex = nextVertex;
nextVertex = findNextVertex(currentVertex, adjacencyList, noOfVerticesLeft);
debugWhile++;
}
// Print the traverse route
cout << "Final traversal of Vertices and size" << traverseList.size() << endl;
for(list<int>::iterator it=traverseList.begin(); it!=traverseList.end(); it++) {
cout << *it << " - ";
}
cout << endl;
// Display the video
cout << "Expression animation" << endl;
string listOfFacesFileName = outputLocation + "/AllFaces/ListOfFaces.txt";
ifstream listOfFacesFileNameHandle(listOfFacesFileName.c_str());
vector<string> faceMap;
// Collect the mapping
cout << "Collecting the mapping" << endl;
if(listOfFacesFileNameHandle.is_open()) {
while(getline(listOfFacesFileNameHandle, line)) {
split(stringVector, line, boost::is_any_of(" "));
//cout << "DEBUG 66:: stringVector[0]=" << stringVector[0] << endl;
faceMap.push_back(stringVector[0]);
}
}
Mat faceMat, prevMat, midMat;
const char* EXPRESSION_DISPLAY = "Expressions";
namedWindow(EXPRESSION_DISPLAY, CV_WINDOW_AUTOSIZE);
// Display the traversed faces and make a video of the same
Size sizeT(200, 200);
const string NAME = "Animation.avi";
cout << "DEBUG 11: " << NAME << endl;
VideoWriter outputVideo;
//outputVideo.open( , -1, 20, sizeT, true);
outputVideo.open("/home/mallikarjun/Desktop/test1.avi", CV_FOURCC('D','I','V','X'), 5, Size (200, 200), true );
if (!outputVideo.isOpened())
{
perror("Could not open the output video for write");
}
/* Size sizeT(200, 200);
CvVideoWriter *writer = cvCreateVideoWriter(
"data4.avi",
CV_FOURCC('M','J','P','G'),
30,
sizeT);
cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
cvMoveWindow("mainWin", 200, 200);
*/
bool firstTime_bool = true;
cout << "Displaying the traversed faces" << endl;
for(list<int>::iterator it=traverseList.begin(); it!=traverseList.end(); it++) {
int faceNumber = *it;
//cout << "DEBUG 88:: faceMap[i]=" << faceMap[faceNumber] << endl;
string strTemp = outputLocation + "/AllFaces/Sample Set/" + faceMap[faceNumber];
//cout << "DEBUG 77:: strTemp=" << strTemp << endl;
//IplImage* img=cvLoadImage(strTemp.c_str());
faceMat = imread(strTemp.c_str(), CV_LOAD_IMAGE_COLOR);
if(!firstTime_bool){
addWeighted(prevMat, 0.5, faceMat, 0.5, 0, midMat, -1);
//putText(midMat, "Bridge Image", cvPoint(30,30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
outputVideo << midMat;
putText(faceMat, faceMap[faceNumber].c_str(), cvPoint(30,30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
outputVideo << faceMat;
}
else{
putText(faceMat, faceMap[faceNumber].c_str(), cvPoint(30,30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
outputVideo << faceMat;
firstTime_bool = false;
}
prevMat = faceMat.clone();
//cvShowImage("mainWin", img );
//cvWriteFrame(writer,img);
imshow(EXPRESSION_DISPLAY, faceMat);
cvWaitKey(10);
}
//cvReleaseVideoWriter(&writer);
}
示例5: main
int main(int argc, char** argv ){
//init capture devices
cap0 = ConfigVideoCapture(cap0dev);
cap1 = ConfigVideoCapture(cap1dev);
namedWindow("cap0",WINDOW_NORMAL);
namedWindow("cap1",WINDOW_NORMAL);
outputVideocap0.open("RecoredVideo/Cam0.avi",CV_FOURCC('M', 'J', 'P', 'G'),11,Size(720,960),true);
outputVideocap1.open("RecoredVideo/Cam1.avi",CV_FOURCC('M', 'J', 'P', 'G'),11,Size(720,960),true);
if (!outputVideocap0.isOpened() || !outputVideocap1.isOpened())
{
printf("Output video could not be opened\n");
return 0;
}
if (!cap0.isOpened() || !cap1.isOpened()){
printf("Output video could not be opened\n");
return 0;
}
//record video
printf("Starting to record video... \n(Press 'c'-key to stop)\n");
fflush(stdout);
for(;;){
clock_t begin = clock();
thread Grab0(threadGrab0);
thread Grab1(threadGrab1);
Grab0.join();
Grab1.join();
char c = (char)waitKey(1);
if( c == 'c')
break;
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
double fps = 1.0/elapsed_secs;
printf("FPS: %f (Press 'c'-key to stop)\n",fps);
fflush(stdout);
}
printf("Writeing video to harddrive...");
fflush(stdout);
for(Mat img : leftImgs)
{
outputVideocap0.write(img);
}
for(Mat img : rightImgs)
{
outputVideocap1.write(img);
}
outputVideocap0.release();
outputVideocap1.release();
printf(" done\n");
fflush(stdout);
return 0;
}
示例6: main
int main(int argc, char** argv)
{
CommandLineParser parser(argc, argv, params);
if (parser.get<bool>("help"))
{
cout << about << endl;
parser.printMessage();
return 0;
}
String modelConfiguration = parser.get<string>("proto");
String modelBinary = parser.get<string>("model");
//! [Initialize network]
dnn::Net net = readNetFromCaffe(modelConfiguration, modelBinary);
//! [Initialize network]
if (parser.get<bool>("opencl"))
{
net.setPreferableTarget(DNN_TARGET_OPENCL);
}
if (net.empty())
{
cerr << "Can't load network by using the following files: " << endl;
cerr << "prototxt: " << modelConfiguration << endl;
cerr << "caffemodel: " << modelBinary << endl;
cerr << "Models can be downloaded here:" << endl;
cerr << "https://github.com/chuanqi305/MobileNet-SSD" << endl;
exit(-1);
}
VideoCapture cap;
if (parser.get<String>("video").empty())
{
int cameraDevice = parser.get<int>("camera_device");
cap = VideoCapture(cameraDevice);
if(!cap.isOpened())
{
cout << "Couldn't find camera: " << cameraDevice << endl;
return -1;
}
}
else
{
cap.open(parser.get<String>("video"));
if(!cap.isOpened())
{
cout << "Couldn't open image or video: " << parser.get<String>("video") << endl;
return -1;
}
}
Size inVideoSize;
inVideoSize = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH), //Acquire input size
(int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));
Size cropSize;
if (inVideoSize.width / (float)inVideoSize.height > WHRatio)
{
cropSize = Size(static_cast<int>(inVideoSize.height * WHRatio),
inVideoSize.height);
}
else
{
cropSize = Size(inVideoSize.width,
static_cast<int>(inVideoSize.width / WHRatio));
}
Rect crop(Point((inVideoSize.width - cropSize.width) / 2,
(inVideoSize.height - cropSize.height) / 2),
cropSize);
VideoWriter outputVideo;
outputVideo.open(parser.get<String>("out") ,
static_cast<int>(cap.get(CV_CAP_PROP_FOURCC)),
cap.get(CV_CAP_PROP_FPS), cropSize, true);
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera/video or read image
if (frame.empty())
{
waitKey();
break;
}
if (frame.channels() == 4)
cvtColor(frame, frame, COLOR_BGRA2BGR);
//! [Prepare blob]
Mat inputBlob = blobFromImage(frame, inScaleFactor,
Size(inWidth, inHeight), meanVal, false); //Convert Mat to batch of images
//! [Prepare blob]
//! [Set input blob]
net.setInput(inputBlob, "data"); //set the network input
//.........这里部分代码省略.........
示例7: main
int main(int argc, char** argv) {
/* the input and output dir */
string input_dir = "/home/user/ccv/data/sunny_day-img-left";
/* initialize the ccv states */
ccv_enable_default_cache();
ccv_dpm_mixture_model_t* model = ccv_dpm_read_mixture_model(argv[1]);
/* set the pedestrian detection parameters */
ccv_dpm_param_t myparameters;
myparameters.threshold = 0.4;
myparameters.interval = 8;
myparameters.min_neighbors = 1;
myparameters.flags = 0;
/* debug */
string source = "/home/user/ccv/demo1.avi";
VideoCapture inputVideo(source); // Open input
if (!inputVideo.isOpened()) {
cout << "Could not open the input video: " << source << endl;
return -1;
}
int ex = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form
cout<<"The coding is "<<ex<<endl;
cout<<"The fps is "<<inputVideo.get(CV_CAP_PROP_FPS)<<endl;
/* initialize the video writer */
Mat getSize = imread(input_dir + "/image_00000100_0.png");
Size videoSize = getSize.size();
getSize.release();
VideoWriter outputVideo;
outputVideo.open("/home/user/ccv/data/output/eth2_reg_overlaps.avi", ex, fps, videoSize, true);
if (!outputVideo.isOpened()) {
cout<<"Could not open the output video"<<endl;
return false;
}
/* process one by one */
for (int iImage = imageStart; iImage <= imageEnd; iImage++) {
/* read the image, ccv_image for detection, and opencv Mat for recording */
string imageTail;
if (iImage < 10) imageTail = "0000000" + patch::to_string(iImage);
else if (iImage < 100) imageTail = "000000" + patch::to_string(iImage);
else imageTail = "00000" + patch::to_string(iImage);
string image_name = input_dir + "/image_" + imageTail + "_0.png";
ccv_dense_matrix_t* image = 0;
ccv_read(image_name.c_str(), &image, CCV_IO_ANY_FILE);
Mat plot_result = imread(image_name);
if (image == 0) cerr<<"The reading of dataset failed!"<<endl;
cout<<"Image succussfully read"<<endl;
/* processing the image one by one */
unsigned int elapsed_time = get_current_time();
ccv_array_t* seq = ccv_dpm_detect_objects(image, &model, 1, myparameters);
elapsed_time = get_current_time() - elapsed_time;
cout<<"Using "<<elapsed_time<<"ms on detecting the "<<iImage<<"th image"<<endl;
if (seq != NULL) {
/* get the overlaps */
bool* flag = new bool[seq->rnum];
for (int i = 0; i < seq->rnum; i++) flag[i] = true;
for (int i = 0; i < seq->rnum; i++) {
for (int j = 0; i < seq->rnum; i++) {
/* a bigger area */
ccv_root_comp_t* comp1 = (ccv_root_comp_t*)ccv_array_get(seq, i); /* get the ith number */
ccv_root_comp_t* comp2 = (ccv_root_comp_t*)ccv_array_get(seq, j); /* get the jth number */
float dx1 = comp1->rect.x - comp2->rect.x;
float dx2 = comp1->rect.x + comp1->rect.width - comp2->rect.x + comp2->rect.width;
if (abs(dx1) / comp1->rect.width < 0.2 && abs(dx2) / comp2->rect.width < 0.2 &&
abs(dx1) / comp2->rect.width < 0.2 && abs(dx2) / comp1->rect.width < 0.2 &&
get_overlaps(comp1, comp2) > 0.5) {
rectangle(plot_result,
cv::Point(int(min(comp1->rect.x, comp2->rect.x)), int(min(comp1->rect.y, comp2->rect.y))),
cv::Point(int(max(comp1->rect.x + comp1->rect.width, comp2->rect.x + comp2->rect.width)),
int(max(comp1->rect.y + comp1->rect.height, comp2->rect.y + comp2->rect.height))),
cvScalar(255, 0, 0), 2, 8, 0);
}
}
}
/* the detection has something to say */
for (int i = 0; i < seq->rnum; i++) {
ccv_root_comp_t* comp = (ccv_root_comp_t*)ccv_array_get(seq, i); /* get the ith number */
/* a simple regression trick */
float predHeight = ((float)videoSize.height / 2 - comp->rect.y) * 2 + 10;
if (predHeight - comp->rect.height > predHeight * 0.5) {
rectangle(plot_result,
cv::Point(int(comp->rect.x), int(comp->rect.y)),
cv::Point(int(comp->rect.x + comp->rect.width), int(comp->rect.y + comp->rect.height)),
cvScalar(0, 0, 255), 2, 8, 0);
} else{
rectangle(plot_result,
cv::Point(int(comp->rect.x), int(comp->rect.y)),
cv::Point(int(comp->rect.x + comp->rect.width), int(comp->rect.y + comp->rect.height)),
cvScalar(0, 255, 0), 2, 8, 0);
}
}
//.........这里部分代码省略.........
示例8: main
//.........这里部分代码省略.........
{
if (useCuda)
superRes = createSuperResolution_BTVL1_CUDA();
else
superRes = createSuperResolution_BTVL1();
Ptr<DenseOpticalFlowExt> of = createOptFlow(optFlow, useCuda);
if (of.empty())
exit(-1);
superRes->set("opticalFlow", of);
}
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
Ptr<FrameSource> frameSource;
if (useCuda)
{
// Try to use gpu Video Decoding
try
{
frameSource = createFrameSource_Video_CUDA(inputVideoName);
Mat frame;
frameSource->nextFrame(frame);
}
catch (const cv::Exception&)
{
frameSource.release();
}
}
if (!frameSource)
frameSource = createFrameSource_Video(inputVideoName);
// skip first frame, it is usually corrupted
{
Mat frame;
frameSource->nextFrame(frame);
cout << "Input : " << inputVideoName << " " << frame.size() << endl;
cout << "Scale factor : " << scale << endl;
cout << "Iterations : " << iterations << endl;
cout << "Temporal radius : " << temporalAreaRadius << endl;
cout << "Optical Flow : " << optFlow << endl;
#if defined(HAVE_OPENCV_OCL)
cout << "Mode : " << (useCuda ? "CUDA" : useOcl? "OpenCL" : "CPU") << endl;
#else
cout << "Mode : " << (useCuda ? "CUDA" : "CPU") << endl;
#endif
}
superRes->setInput(frameSource);
VideoWriter writer;
for (int i = 0;; ++i)
{
cout << '[' << setw(3) << i << "] : ";
Mat result;
#if defined(HAVE_OPENCV_OCL)
cv::ocl::oclMat result_;
if(useOcl)
{
MEASURE_TIME(superRes->nextFrame(result_));
}
else
#endif
{
MEASURE_TIME(superRes->nextFrame(result));
}
#ifdef HAVE_OPENCV_OCL
if(useOcl)
{
if(!result_.empty())
{
result_.download(result);
}
}
#endif
if (result.empty())
break;
imshow("Super Resolution", result);
if (waitKey(1000) > 0)
break;
if (!outputVideoName.empty())
{
if (!writer.isOpened())
writer.open(outputVideoName, VideoWriter::fourcc('X', 'V', 'I', 'D'), 25.0, result.size());
writer << result;
}
}
return 0;
}
示例9: main
int main(int argc, char* argv[])
{
koordinate.open("F:/TRAKASNIMCI/log.txt",fstream::app);
//pauza i resume koda
bool pause = false;
//kalibracija boja
bool calibrationMode = true;
//UPUTSTVO
cout<<"CONTROLS\n";
cout<<"************************************\n";
cout<<"Press C to reset UKUPNO and SVI \n";
cout<<"Press P to pause program \n";
cout<<"************************************\n";
cout<<"Press M to enter manual record mode\n";
cout<<"Press A to return to automatic record mode \n";
cout<<"Press N to start new record \n";
cout<<"************************************\n";
cout<<"Current record mode > AUTOMATIC\n";
cout<<"************************************\n";
//Matrix to store each frame of the webcam feed
Mat cameraFeed;
Mat threshold;
Mat HSV;
capture.open(0);
if(calibrationMode){
//kreiraj slajdere na treshold prozoru
createTrackbars();
} else {
//kreiraj slajdere na glavnom prozoru pokretna traka
trackbarWaitkey();
}
capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
//Video writer
VideoWriter oVideoWriter;//create videoWriter object, not initialized yet
double dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
double dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
//set framesize for use with videoWriter
Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));
if(!capture.isOpened()){
cout<<"GRESKA PRILIKOM PREUZIMANJA VIDEA\n";
getchar();
return -1;
}
Objekat crven("crven"), zelen("zelen"), zut("zut"), plav("plav");
//start an infinite loop where webcam feed is copied to cameraFeed matrix
//all of our operations will be performed within this loop
while(1){
//store image to matrix
capture.read(cameraFeed);
//convert frame from BGR to HSV colorspace
cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
if(calibrationMode==true){
//if in calibration mode, we track objects based on the HSV slider values.
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
morphOps(threshold);
imshow(thresholdWindow,threshold);
trackFilteredObject(threshold,HSV,cameraFeed);
} else {
//crni kvadrat
rectangle(cameraFeed,Point(200,380),Point(650,460),crBoja,-1);
//crvene
inRange(HSV,crven.getHSVmin(),crven.getHSVmax(),threshold);
//morphOps(threshold);
trackFilteredObject(crven,threshold,HSV,cameraFeed);
//zute
inRange(HSV,zut.getHSVmin(),zut.getHSVmax(),threshold);
//morphOps(threshold);
trackFilteredObject(zut,threshold,HSV,cameraFeed);
//zelene
inRange(HSV,zelen.getHSVmin(),zelen.getHSVmax(),threshold);
//morphOps(threshold);
trackFilteredObject(zelen,threshold,HSV,cameraFeed);
//.........这里部分代码省略.........
示例10: main
int main( int argc, const char** argv )
{
VideoCapture cap;
Rect trackWindow;
int hsize = 16;
float hranges[] = {0,180};
const float* phranges = hranges;
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
help();
return 0;
}
int camNum = parser.get<int>(0);
cap.open(camNum);
if( !cap.isOpened() )
{
help();
cout << "***Could not initialize capturing...***\n";
cout << "Current parameter's value: \n";
parser.printMessage();
return -1;
}
Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH), // Acquire input size
(int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));
VideoWriter videoStream;
videoStream.open("./VirtualPiano.mp4", -1, cap.get(CV_CAP_PROP_FPS), S, true);
if (!videoStream.isOpened())
{
cout << "Could not open the output video." << endl;
return -1;
}
cout << hot_keys;
//namedWindow( "Histogram", 0 );
namedWindow( "VirtualPiano", 0 );
resizeWindow( "VirtualPiano", WINDOW_WIDTH, WINDOW_HEIGHT);
setMouseCallback( "VirtualPiano", onMouse, 0 );
//createTrackbar( "Vmin", "CamShift Demo", &vmin, 256, 0 );
//createTrackbar( "Vmax", "CamShift Demo", &vmax, 256, 0 );
//createTrackbar( "Smin", "CamShift Demo", &smin, 256, 0 );
Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
RotatedRect trackBox;
bool paused = false;
for(;;)
{
if( !paused )
{
cap >> frame;
if( frame.empty() )
break;
}
frame.copyTo(image);
Mat flippedImage;
flip(image, flippedImage, 1);
image = flippedImage;
if( !paused )
{
cvtColor(image, hsv, COLOR_BGR2HSV);
if( trackObject )
{
int _vmin = vmin, _vmax = vmax;
inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
Scalar(180, 256, MAX(_vmin, _vmax)), mask);
int ch[] = {0, 0};
hue.create(hsv.size(), hsv.depth());
mixChannels(&hsv, 1, &hue, 1, ch, 1);
if( trackObject < 0 )
{
Mat roi(hue, selection), maskroi(mask, selection);
calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
normalize(hist, hist, 0, 255, NORM_MINMAX);
trackWindow = selection;
trackObject = 1;
histimg = Scalar::all(0);
int binW = histimg.cols / hsize;
Mat buf(1, hsize, CV_8UC3);
for( int i = 0; i < hsize; i++ )
buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
cvtColor(buf, buf, COLOR_HSV2BGR);
for( int i = 0; i < hsize; i++ )
{
int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
rectangle( histimg, Point(i*binW,histimg.rows),
Point((i+1)*binW,histimg.rows - val),
Scalar(buf.at<Vec3b>(i)), -1, 8 );
}
}
//.........这里部分代码省略.........
示例11: main
int main(){
//set recording and startNewRecording initially to false.
bool recording = false;
bool startNewRecording = false;
int inc=0;
bool firstRun = true;
//if motion is detected in the video feed, we will know to start recording.
bool motionDetected = false;
//pause and resume code (if needed)
bool pause = false;
//set debug mode and trackingenabled initially to false
//these can be toggled using 'd' and 't'
debugMode = false;
trackingEnabled = true;
//set up the matrices that we will need
//the two frames we will be comparing
Mat frame1,frame2;
//their grayscale images (needed for absdiff() function)
Mat grayImage1,grayImage2;
//resulting difference image
Mat differenceImage;
//thresholded difference image (for use in findContours() function)
Mat thresholdImage;
//video capture object.
VideoCapture capture;
capture.open(0);
VideoWriter oVideoWriter;//create videoWriter object, not initialized yet
double dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
double dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
//set framesize for use with videoWriter
Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));
if(!capture.isOpened()){
cout<<"ERROR ACQUIRING VIDEO FEED\n";
getchar();
return -1;
}
while(1){
//read first frame
capture.read(frame1);
//convert frame1 to gray scale for frame differencing
cv::cvtColor(frame1,grayImage1,COLOR_BGR2GRAY);
//copy second frame
capture.read(frame2);
//convert frame2 to gray scale for frame differencing
cv::cvtColor(frame2,grayImage2,COLOR_BGR2GRAY);
//perform frame differencing with the sequential images. This will output an "intensity image"
//do not confuse this with a threshold image, we will need to perform thresholding afterwards.
cv::absdiff(grayImage1,grayImage2,differenceImage);
//threshold intensity image at a given sensitivity value
cv::threshold(differenceImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
if(debugMode==true){
//show the difference image and threshold image
cv::imshow("Difference Image",differenceImage);
cv::imshow("Threshold Image", thresholdImage);
}else{
//if not in debug mode, destroy the windows so we don't see them anymore
cv::destroyWindow("Difference Image");
cv::destroyWindow("Threshold Image");
}
//blur the image to get rid of the noise. This will output an intensity image
cv::blur(thresholdImage,thresholdImage,cv::Size(BLUR_SIZE,BLUR_SIZE));
//threshold again to obtain binary image from blur output
cv::threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
if(debugMode==true){
//show the threshold image after it's been "blurred"
imshow("Final Threshold Image",thresholdImage);
}
else {
//if not in debug mode, destroy the windows so we don't see them anymore
cv::destroyWindow("Final Threshold Image");
}
//if tracking enabled, search for Motion
if(trackingEnabled){
//check for motion in the video feed
//the detectMotion function will return true if motion is detected, else it will return false.
//set motionDetected boolean to the returned value.
motionDetected = detectMotion(thresholdImage,frame1);
}else{
//reset our variables if tracking is disabled
motionDetected = false;
}
////////////**STEP 1**//////////////////////////////////////////////////////////////////////////////////////////////////////////////
//draw time stamp to video in bottom left corner. We draw it before we write so that it is written on the video file.
//if we're in recording mode, write to file
if(recording){
//check if it's our first time running the program so that we don't create a new video file over and over again.
//.........这里部分代码省略.........
示例12: main
int main(int argc,char** argv){
int houghVote = 100;
Mat src1;
//cout<<"linenumber="<<linenumber;
float rho_values[linenumber];
float theta_values[linenumber];
Mat src,contours,contoursInv,ortho,H;
float theta_max = 1000000;
float theta_min = -1000000;
float rho_min,rho_max;
//VideoCapture capture(1);
VideoCapture capture(1);
namedWindow("ortho", CV_WINDOW_AUTOSIZE);
double dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
double dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
cout << "Frame Size = " << dWidth << "x" << dHeight << endl;
Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));
VideoWriter oVideoWriter ("wierd.avi", CV_FOURCC('D', 'I', 'V', '3'), 20, frameSize, true); //initialize the VideoWriter object
if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program
{
cout << "ERROR: Failed to write the video" << endl;
return -1;
} int count=0;
while(true){
vector<vector<Point> > cnt;
vector<Vec4i> hierarchy;
capture >> src;
src.copyTo(src1);
// src = imread(argv[1]);
imshow("Input image",src1);
// imwrite("input.jpg",src1);
vector<Point2f> source_points;
vector<Point2f> dest_points;
source_points.push_back(cv::Point2f(169,386));
source_points.push_back(cv::Point2f(449,313));
source_points.push_back(cv::Point2f(212,111));
source_points.push_back(cv::Point2f(429,98));
dest_points.push_back(cv::Point2f(120,347));
dest_points.push_back(cv::Point2f(448,276));
dest_points.push_back(cv::Point2f(217,177));
dest_points.push_back(cv::Point2f(419,154));
H = getPerspectiveTransform( source_points, dest_points);
warpPerspective(src, src, H, src.size(), INTER_CUBIC | WARP_INVERSE_MAP);
imshow("ortho",src);
// imwrite("ortho.jpg",src);
// imwrite("input1.jpg",src);
cvtColor(src,src,CV_RGB2HSV);
inRange(src, Scalar(0,0,200), Scalar(140,255,255), src);
//imshow("image",src);
erode(src,src,cv::Mat());
imshow("erode",src);
Canny(src,contours,50,150,3);
//equalizeHist(contours,contours);
imshow("Canny",contours);
//imwrite("canny.jpg",contours);
//threshold(contours,contoursInv,128,255,THRESH_BINARY_INV);
//imshow("threshold",contoursInv);
std::vector<Vec2f> lines;
if (houghVote < 1 or lines.size() > 2){
houghVote = 100;
}
else{ houghVote += 25;}
while(lines.size() < 5 && houghVote > 0){
HoughLines(contours,lines,1,CV_PI/180, houghVote);
houghVote -= 5;
}
//std::cout << houghVote << "\n";
Mat hough(contours.rows,contours.cols,CV_8U,Scalar(0));
Mat result1(contours.rows,contours.cols,CV_8U,Scalar(0));
//src.copyTo(hough);
std::vector<Vec2f>::const_iterator it= lines.begin();
//Mat hough(src.size(),CV_8U,Scalar(0));
//if(count==0)
// cout<<"no. of lines="<<lines.end()-lines.begin()<<endl;
// int val=0;
while (it!=lines.end()) {
float rho= (*it)[0];
float theta= (*it)[1];
/*if (theta < theta_min)
{
theta_min = theta;
rho_min = rho;
}
[email protected]
else if (theta > theta_max)
{
theta_max = theta;
rho_max = rho;
}*/
rho_values[it-lines.begin()]=rho;
theta_values[it-lines.begin()]=theta;
// cout<<"rho="<<rho_values[it-lines.begin()]<<"theta="<<theta_values[it-lines.begin()]<<endl;
Point pt1(rho/cos(theta),0);
Point pt2((rho-hough.rows*sin(theta))/cos(theta),hough.rows);
// if(count==0)
//.........这里部分代码省略.........
示例13: main
int main(int argc, char** argv)
{
if(argc >= 3)
{
VideoCapture inputVideo(argv[1]); // open the default camera
if(!inputVideo.isOpened()) // check if we succeeded
return -1;
// Initialize
VideoWriter outputVideo; // Open the output
const string source = argv[2]; // the source file name
const string NAME = source + ".mp4"; // Form the new name with container
int ex = inputVideo.get(CV_CAP_PROP_FOURCC); // Get Codec Type- Int form
std::cout << ex << "\n" << (int)inputVideo.get(CV_CAP_PROP_FOURCC) << "\n";
Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), //Acquire input size
(int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));
outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, false);
char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
cout << "Input codec type: " << EXT << endl;
if (!outputVideo.isOpened())
{
cout << "Could not open the output video for write \n";
return -1;
}
// Basketball Color
int iLowH = 180;
int iHighH = 16;
int iLowS = 95;
int iHighS = 200;
int iLowV = 75;
int iHighV = 140;
// court Color
int courtLowH = 0;
int courtHighH = 20;
int courtLowS = 50;
int courtHighS = 150;
int courtLowV = 160;
int courtHighV = 255;
namedWindow("Result Window", 1);
//namedWindow("Court Window", 1);
// Mat declaration
Mat prev_frame, prev_gray, cur_frame, cur_gray;
Mat frame_blurred, frameHSV, frameGray;
// take the first frame
inputVideo >> prev_frame;
/* manual ball selection */
MouseParams mp;
prev_frame.copyTo( mp.ori );
prev_frame.copyTo( mp.img );
setMouseCallback("Result Window", BallSelectFunc, &mp );
int enterkey = 0;
while(enterkey != 32 && enterkey != 113)
{
enterkey = waitKey(30) & 0xFF;
imshow("Result Window", mp.img);
}
Rect lastBallBox;
Point lastBallCenter;
Point lastMotion;
/* Kalman Filter Initialization */
KalmanFilter KF(4, 2, 0);
float transMatrixData[16] = {1,0,1,0, 0,1,0,1, 0,0,1,0, 0,0,0,1};
KF.transitionMatrix = Mat(4, 4, CV_32F, transMatrixData);
Mat_<float> measurement(2,1);
measurement.setTo(Scalar(0));
KF.statePre.at<float>(0) = mp.pt.x;
KF.statePre.at<float>(1) = mp.pt.y;
KF.statePre.at<float>(2) = 0;
KF.statePre.at<float>(3) = 0;
setIdentity(KF.measurementMatrix);
setIdentity(KF.processNoiseCov, Scalar::all(1e-4));
setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));
setIdentity(KF.errorCovPost, Scalar::all(.1));
int pre_status_7=0;
/* start tracking */
setMouseCallback("Result Window", CallBackFunc, &frameHSV);
for(int frame_num=1; frame_num < inputVideo.get(CAP_PROP_FRAME_COUNT); ++frame_num)
{
int cur_status_7=pre_status_7;
inputVideo >> cur_frame; // get a new frame
// Blur & convert frame to HSV color space
cv::GaussianBlur(prev_frame, frame_blurred, cv::Size(5, 5), 3.0, 3.0);
cvtColor(frame_blurred, frameHSV, COLOR_BGR2HSV);
//.........这里部分代码省略.........
示例14: main
int main(int argc, char** argv)
{
// variable initialization
int keyInput = 0;
int nFrames = 0, nSmoothFrames = 0, nFailedFrames = 0, nBlindFrames = 0;
int lastDx = 0, lastDy = 0;
bool bOverlay = true; // plot overlay?
bool bTrace = true & bOverlay; // plot 'bubble' trace? (only when overlay active)
Ptr<BackgroundSubtractor> pMOG2;
VideoCapture capture; // input video capture
VideoWriter outputVideo; // output video writer
Mat curFrame, // current original frame
fgMaskMOG2, // foreground mask from MOG2 algorithm
bgImg, // container for background image from MOG2
grayFrame, // grayscale conversion of original frame
frameDil, // dilated grayscale frame
canny_out; // output of Canny algorithm for shape outline detection
Mat *pOutMat = &curFrame; // pointer to image that will be rendered once per input video frame
Mat strucElem = getStructuringElement(MORPH_RECT, Size(3, 3)); // dilatation base element
// containers for output of findContours()
vector<Mat> contours;
vector<Vec4i> hierarchy;
// read video input filename from command line and construct output filename
if (argc < 2) {
cerr << "Please provide input video filename." << endl;
return EXIT_FAILURE;
}
string filename(argv[1]);
string outName = filename.substr(0, filename.length() - 4) + "_out.avi";
Rect lastKnownRect, lastRect;
Point lastKnownPos, lastPos, estimatePos, plotPos;
list<Point> lastKnownPositions;
// init 'live' video output window
namedWindow("Motion tracking");
// try to open input file
capture.open(filename);
if (!capture.isOpened()) {
cerr << "Unable to open file '" << filename << "'." << endl;
return EXIT_FAILURE;
} else {
cout << "Successfully opened file '" << filename << "'." << endl;
}
// try to write to output file
Size vidS = Size((int)capture.get(CV_CAP_PROP_FRAME_WIDTH), (int)capture.get(CV_CAP_PROP_FRAME_HEIGHT));
outputVideo.open(outName, CV_FOURCC('P','I','M','1'), capture.get(CV_CAP_PROP_FPS), vidS, true);
if (!outputVideo.isOpened()) {
cerr << "Unable to write to output video." << endl;
return EXIT_FAILURE;
}
// build frame buffer and background subtractor
pMOG2 = createBackgroundSubtractorMOG2(500, 30., true);
// main loop over frames
while (capture.read(curFrame) && (char)keyInput != 'q')
{
++nFrames;
cvtColor(curFrame, grayFrame, CV_BGR2GRAY); // convert to grayscale
threshold(grayFrame, grayFrame, 128., 0., CV_THRESH_TRUNC); // try to mitigate (white) reflections by truncating the current frame
GaussianBlur(grayFrame, grayFrame, Size(7, 7), 0, 0);
pMOG2->apply(grayFrame, fgMaskMOG2);
// erode and dilate to remove some noise
erode(fgMaskMOG2, frameDil, strucElem);
dilate(frameDil, frameDil, strucElem);
// dilate and erode to remove holes from foreground
dilate(frameDil, frameDil, strucElem);
erode(frameDil, frameDil, strucElem);
// canny to find foreground outlines
Canny(frameDil, canny_out, 100, 200, 3);
// find contours, sort by contour size (descending)
findContours(canny_out, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); // find contours
sort(contours.begin(), contours.end(), rvs_cmp_contour_area); // sort by contour area, beginning with the largest
// determine largest "moving" object
int iMaxSize = 0;
bool bFoundCloseContour = false;
for (unsigned int i = 0; i < contours.size(); i++)
{
if (contourArea(contours[i]) < CONTOUR_AREA_THRESH) // ignore contours which are too small (noise)
break;
// ignore contours which are too far away from the last frame
Rect boun = boundingRect(contours[i]); // bounding rect
//.........这里部分代码省略.........
示例15: main
int main(int argc, char *argv[])
{
help();
if (argc != 4)
{
cout << "Not enough parameters" << endl;
return -1;
}
const string source = argv[1]; // the source file name
const bool askOutputType = argv[3][0] =='Y'; // If false it will use the inputs codec type
VideoCapture inputVideo(source); // Open input
if (!inputVideo.isOpened())
{
cout << "Could not open the input video: " << source << endl;
return -1;
}
string::size_type pAt = source.find_last_of('.'); // Find extension point
const string NAME = source.substr(0, pAt) + argv[2][0] + ".avi"; // Form the new name with container
int ex = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form
// Transform from int to char via Bitwise operators
char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), // Acquire input size
(int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));
VideoWriter outputVideo; // Open the output
if (askOutputType)
outputVideo.open(NAME, ex=-1, inputVideo.get(CV_CAP_PROP_FPS), S, true);
else
outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, true);
if (!outputVideo.isOpened())
{
cout << "Could not open the output video for write: " << source << endl;
return -1;
}
cout << "Input frame resolution: Width=" << S.width << " Height=" << S.height
<< " of nr#: " << inputVideo.get(CV_CAP_PROP_FRAME_COUNT) << endl;
cout << "Input codec type: " << EXT << endl;
int channel = 2; // Select the channel to save
switch(argv[2][0])
{
case 'R' : channel = 2; break;
case 'G' : channel = 1; break;
case 'B' : channel = 0; break;
}
Mat src, res;
vector<Mat> spl;
for(;;) //Show the image captured in the window and repeat
{
inputVideo >> src; // read
if (src.empty()) break; // check if at end
split(src, spl); // process - extract only the correct channel
for (int i =0; i < 3; ++i)
if (i != channel)
spl[i] = Mat::zeros(S, spl[0].type());
merge(spl, res);
//outputVideo.write(res); //save or
outputVideo << res;
}
cout << "Finished writing" << endl;
return 0;
}