本文整理汇总了C++中cv::VideoCapture::open方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoCapture::open方法的具体用法?C++ VideoCapture::open怎么用?C++ VideoCapture::open使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::VideoCapture
的用法示例。
在下文中一共展示了VideoCapture::open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: openVideo
static int openVideo(cv::VideoCapture &capture, std::string filename){
std::cout << "abriendo video " << filename << std::endl;
capture.open(filename);
if (!capture.isOpened()) {
std::cout << "abriendo camara " << filename << std::endl;
int id_webcam = std::atoi(filename.c_str());
capture.open(id_webcam);
}
if (!capture.isOpened()) {
// std::cout << "no puedo abrir " << filename << std::endl;
return 0;
}
return 1;
}
示例2: readParameters
bool readParameters(int argc, char** argv)
{
if (argc<4) {
usage();
return false;
}
TheInputVideo=argv[1];
/* // read input video
if (TheInputVideo=="live") TheVideoCapturer.open(0);
else TheVideoCapturer.open(argv[1]);
if (!TheVideoCapturer.isOpened())
{
cerr<<"Could not open video"<<endl;
return false;
}*/
//read from camera
if (TheInputVideo=="live") TheVideoCapturer.open(0);
else TheVideoCapturer.open(TheInputVideo);
if (!TheVideoCapturer.isOpened())
{
cerr<<"Could not open video"<<endl;
return -1;
}
// read intrinsic file
try {
CameraParams.readFromXMLFile(argv[2]);
} catch (std::exception &ex) {
cout<<ex.what()<<endl;
return false;
}
// read board file
try {
TheBoardConfig.readFromFile(argv[3]);
} catch (std::exception &ex) {
cout<<ex.what()<<endl;
return false;
}
if(argc>4) TheMarkerSize=atof(argv[4]);
else TheMarkerSize=1.;
return true;
}
示例3: mono_handler
void mono_handler(const lcm_recv_buf_t *rbuf, const char* channel, const lcmt_stereo *msg, void *user) {
// open the frame of the video specified by the message
// check to see if the current video is the correct video file
if (current_video_number != msg->video_number) {
video_capture.release();
std::string newfile = GetMonoFilename(msg->timestamp, msg->video_number);
if (newfile.empty()) {
return;
}
std::cout << "Opening file: " << newfile << std::endl;
if (!video_capture.open(newfile)) {
std::cerr << "Failed to open file: " << newfile << std::endl;
return;
}
current_video_number = msg->video_number;
}
video_capture.set(CV_CAP_PROP_POS_FRAMES, msg->frame_number + frame_offset);
cv::Mat frame;
video_capture >> frame;
SendImageOverLcm(lcm_, image_channel, frame);
}
示例4: initVideoStream
void initVideoStream(cv::VideoCapture &cap)
{
if (cap.isOpened())
cap.release();
cap.open(0); // open the default camera
}
示例5: InitOpenCVModules
void InitOpenCVModules() //OPENCV 데이터들의 초기화
{
/*----------------------*/
//
//
//
//
/*-----------------------*/
if (Webcam_mode)
{
camera.open(Webcam_number);
if (!camera.isOpened()) //소스 영상 에러체크
{
//error in opening the video input
cerr << "Unable to open Camera Stream" << endl;
exit(EXIT_FAILURE);
}
camera >> Current_Frame; //카메라 소스에서 Current Frame으로 데이터를 넣어준다.
}
else
{
示例6: open
void open(string name, int type){
capture_type = type;
if(capture_type==0){
videoCapture.open(name);
}else{
imageCapture.open(name);
}
};
示例7: open
bool SubsExtractor::open(string file)
{
videofile = file;
bool o = cap->open(videofile);
StartFrame = 0;
EndFrame = cap->get(CV_CAP_PROP_FRAME_COUNT);
return o;
}
示例8: init_opencv
bool init_opencv()
{
if (!capture.open(0)) {
std::cerr << "error: capture.open() failed..." << std::endl;
exit(-1);
}
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
result_img.create(cv::Size(640, 480), CV_8UC3);
return true;
}
示例9: openCamera
void openCamera(cv::VideoCapture& cap, int argc, const char** argv) {
cv::CommandLineParser parser(argc, argv, keys);
int camNum = parser.get<int>("1");
cap.open(camNum);
if (!cap.isOpened()) {
//help();
std::cout << "***Could not initialize capturing...***\n";
std::cout << "Current parameter's value: \n";
exit (-1);
}
cap.set(CV_CAP_PROP_FRAME_WIDTH, 160);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 120);
}
示例10: readAndSet
void initParams::readAndSet(cv::VideoCapture& cap, char* input){
cv::FileStorage fs(input, cv::FileStorage::READ);
if(fs.isOpened()){
fs.open(input, cv::FileStorage::READ);
fs["frameWidth"] >> frameWidth;
fs["frameHeight"] >> frameHeight;
fs["fps"] >> fps;
fs["camId"] >> camId;
fs["generateData"] >> genData;
fs["visualizeData"] >> visualize;
fs["writeVideo"] >> writeVideo;
fs["generatePerfData"] >> genPerfData;
fs["videoFile"] >> videoFile;
fs.release();
cap.open(videoFile);
if(!cap.isOpened()){
std::cerr << "Could not access video file: " << videoFile << std::endl;
exit(EXIT_FAILURE);
}
}
示例11: init
void init()
{
//摄像头
camera.open(0);
if(!camera.isOpened())
{
std::cout << "Can not find camera!" << std::endl;
exit(-1);
}
camera.set(cv::CAP_PROP_FRAME_WIDTH,160);
camera.set(cv::CAP_PROP_FRAME_HEIGHT,120);
/* double width = camera.get(cv::CAP_PROP_FRAME_WIDTH);
double height = camera.get(cv::CAP_PROP_FRAME_HEIGHT);
std::cout << "width:" << width << "\t";
std::cout << "height:" << height << "\t" << std::endl;*/
face_cascade_name = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
if(!face_cascade.load(face_cascade_name))
{
std::cout << "can not find face_cascade_file!" << std::endl;
exit(-1);
}
running = true;
pthread_mutex_init(&mutexLock, NULL);
pthread_create(&grabThread, NULL, grabFunc, NULL);
//Setup edi robot mraa control lib
spider_init();
signal(SIGINT, clear);
signal(SIGTERM, clear);
}
示例12: video_test
DWORD WINAPI video_test(void* data) {
MyFrame* f = (MyFrame*)data;
wxClientDC dc(f->left_bottom);
cap.open(0);
cv::Mat img;
cap >> img;
Sleep(2000);
initFrame->Hide();
// 开始显示
for (int i = 0; i < 50; i++) {
f->SetTransparent(i + 206);
Sleep(i / 10);
}
while (cap>>img,!img.empty()) {
IplImage image = img.operator IplImage();
wxImage wximg = wx_from_cv(&image);
wximg.Rescale(320, 240);
dc.DrawBitmap(wxBitmap(wximg), wxDefaultPosition);
}
return 0;
}
示例13: main
int main(int argc, char**argv)
{
capture.open(0);
if (capture.isOpened() == false)
{
std::cerr << "no capture device found" << std::endl;
return 1;
}
capture.set(CV_CAP_PROP_FRAME_WIDTH, vgaSize.width);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, vgaSize.height);
if (capture.get(cv::CAP_PROP_FRAME_WIDTH) != (double)vgaSize.width || capture.get(cv::CAP_PROP_FRAME_HEIGHT) != (double)vgaSize.height)
{
std::cerr << "current device doesn't support " << vgaSize.width << "x" << vgaSize.height << " size" << std::endl;
return 2;
}
cv::Mat image;
capture >> image;
cv::namedWindow(windowName);
cv::imshow(windowName, image);
initCuda();
initArray(image);
char key = -1;
enum device statusDevice = useCpuSimd;
enum precision statusPrecision = precisionFloat;
int index = 1;
cv::Mat stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
cv::Mat gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
double elapsedTime;
while (isFinish(key) == false)
{
capture >> image;
switch (key)
{
case 'h':
case 'H':
// switch to half precision
statusPrecision = precisionHalf;
std::cout << std::endl << header << "half " << std::endl;
stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
break;
case 'f':
case 'F':
// switch to single precision
statusPrecision = precisionFloat;
std::cout << std::endl << header << "single" << std::endl;
stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED);
gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data);
break;
case 'b':
case 'B':
// switch to gray gain
statusPrecision = precisionByte;
std::cout << std::endl << header << "char" << std::endl;
gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE);
break;
case '0':
case '1':
index = key - '0';
switch (statusPrecision)
{
case precisionHalf:
// precision half
stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
break;
case precisionFloat:
// precision single
stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED);
gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data);
break;
case precisionByte:
// precision single
gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE);
break;
default:
break;
}
break;
case 'c':
case 'C':
std::cout << std::endl << "Using CPU SIMD " << std::endl;
statusDevice = useCpuSimd;
break;
case 'g':
case 'G':
std::cout << std::endl << "Using GPU " << std::endl;
statusDevice = useGpu;
break;
default:
break;
}
if (statusDevice == useCpuSimd)
{
elapsedTime = multiplyImage(image, gain);
//.........这里部分代码省略.........
示例14: makeGUI
void OpenCVTemplateApp::makeGUI() {
interface->clear();
interface->addButton("load image", [this] {
auto path = ci::app::getOpenFilePath();
image = cv::imread(path.string());
std::cout <<"cols "<<image.cols << std::endl;
std::cout <<"rows "<<image.rows << std::endl;
std::cout <<"channels "<<image.channels() << std::endl;
imageTexture = gl::Texture::create(fromOcv(image));
});
interface->addButton("load video", [this] {
auto path = ci::app::getOpenFilePath();
video.open(path.string());
frameWidth = video.get(cv::CAP_PROP_FRAME_WIDTH);
frameHeight = video.get(cv::CAP_PROP_FRAME_HEIGHT);
totalFrames = video.get(cv::CAP_PROP_FRAME_COUNT);
video.read(frame);
if(isGrayScale) {
cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
}
frameTexture = gl::Texture::create(fromOcv(frame));
makeGUI();
});
interface->addSeparator();
if(frameTexture) {
interface->addParam("gray scale", &isGrayScale).updateFn([this] {
video.retrieve(frame);
if(isGrayScale) {
cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
}
frameTexture = gl::Texture::create(fromOcv(frame));
makeGUI();
});
interface->addParam("nb of feature",&nbOfFeaturePoints).min(1).max(1000);
if(isGrayScale) {
interface->addButton("get feature points", [this] {
cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04);
});
}
interface->addSeparator();
interface->addParam("frame",&frameIndex).min(0).max(totalFrames-1).step(1).updateFn([this] {
video.set(cv::CAP_PROP_POS_FRAMES,frameIndex);
video.read(frame);
if(isGrayScale) {
cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
}
frameTexture = gl::Texture::create(fromOcv(frame));
});
interface->addSeparator();
interface->addParam("speed", &frameSpeed).min(1).max(1000).step(1);
interface->addButton("play",[this] {
currentState = PLAY;
makeGUI();
});
if(currentState == PLAY) {
interface->addButton("pause",[this] {
currentState = PAUSE;
makeGUI();
});
}
}
}
示例15:
void *cameraThread(void *arg){
std::vector<cmdData> cmd;
cv::Mat frame;
cv::namedWindow("Color", CV_WINDOW_AUTOSIZE); //create a window with the name "MyWindow"
cv::namedWindow("Thresholded", CV_WINDOW_AUTOSIZE); //create a window with the name "HSV"
//cv::namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
int initShutter = 242;
//int initShutter = 0;
int shutterVal = initShutter;
int cannyMin = 30;
int blockSize = 89;
int fps = 60;
// Shutter slider
cv::createTrackbar("Shutter","Color",&shutterVal,4095,shutterCB,NULL);
// Canny treshold
cv::createTrackbar("Threshold","Color",&cannyMin,255,NULL,NULL);
cv::createTrackbar("BlockSize","Color",&blockSize,255,NULL,NULL);
cv::Mat colorFrame;
cv::Mat tresholdedFrame;
cv::Mat hsvFrame;
cv::Mat grey,tmp;
cv::Mat contourOutput;
cap.open(CV_CAP_DC1394); // Open first firewire camera. in 2.3 use CV_CAP, in 2.5 use CV::CAP
cap.set(CV_CAP_PROP_WHITE_BALANCE_BLUE_U,794); // 736
cap.set(CV_CAP_PROP_WHITE_BALANCE_RED_V,437);
cap.set(CV_CAP_PROP_EXPOSURE,initShutter); // "Shutter" in coriander
cap.set(CV_CAP_PROP_FPS,fps);
cap.set(CV_CAP_PROP_GAMMA,0);
cap.set(CV_CAP_PROP_GAIN,30);
while(runState){
cap >> frame;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
// Get color image, decode bayer BGGR.
cv::cvtColor(frame,colorFrame,CV_BayerBG2RGB,0);
cv::cvtColor(colorFrame, grey, CV_RGB2GRAY );
// Remove gripper from img
cv::Rect roi = cv::Rect(0,0,640,360);
cv::Mat submatrix = cv::Mat(grey,roi);
//submatrix.setTo(cv::Scalar(255));
cv::threshold(submatrix,tresholdedFrame,cannyMin,255,cv::THRESH_BINARY_INV);
if(blockSize % 2 == 0){
//Adaptive threshold block size SKAL være ulige..
blockSize = blockSize -1;
}
// cv::adaptiveThreshold(submatrix,tresholdedFrame,255,cv::ADAPTIVE_THRESH_GAUSSIAN_C,cv::THRESH_BINARY_INV,blockSize,0);
cv::Moments mu;
mu = cv::moments(tresholdedFrame,true);
// Find center
cv::Point2f mc = cv::Point2f( mu.m10/mu.m00 , mu.m01/mu.m00 );
// Count non zero pixels. Used for determining if we are screwed (getting large "white" areas.)
cameraError.areaOfObject = cv::countNonZero(tresholdedFrame);
// Draw it - convert to RGB to we can draw on it with colors
cv::cvtColor(tresholdedFrame, tresholdedFrame, CV_GRAY2RGB);
//cv::Mat drawing = cv::Mat::zeros( tresholdedFrame.size(), CV_8UC3 );
cv::Scalar color = cv::Scalar( 0,255,0 );
cv::circle( tresholdedFrame, mc, 5, color, -1, 8, 0 );
//.........这里部分代码省略.........