本文整理汇总了C++中cv::VideoCapture::set方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoCapture::set方法的具体用法?C++ VideoCapture::set怎么用?C++ VideoCapture::set使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::VideoCapture
的用法示例。
在下文中一共展示了VideoCapture::set方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: init_opencv
bool init_opencv()
{
if (!capture.open(0)) {
std::cerr << "error: capture.open() failed..." << std::endl;
exit(-1);
}
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
result_img.create(cv::Size(640, 480), CV_8UC3);
return true;
}
示例2: cameraInit
void cameraInit(){
/*
cv::Mat frame;
cap >> frame; // get a new frame from camera
//cvtColor(frame, edges, CV_BGR2GRAY);
//GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
//Canny(edges, edges, 0, 30, 3);
imshow("edges", frame);
cv::waitKey(30);
}
*/
cap.set(CV_CAP_PROP_FRAME_WIDTH,1280);
cap.set(CV_CAP_PROP_FRAME_HEIGHT,720);
cap.set(CV_CAP_PROP_FPS, 30);
}
示例3: openCamera
void openCamera(cv::VideoCapture& cap, int argc, const char** argv) {
cv::CommandLineParser parser(argc, argv, keys);
int camNum = parser.get<int>("1");
cap.open(camNum);
if (!cap.isOpened()) {
//help();
std::cout << "***Could not initialize capturing...***\n";
std::cout << "Current parameter's value: \n";
exit (-1);
}
cap.set(CV_CAP_PROP_FRAME_WIDTH, 160);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 120);
}
示例4: mono_handler
void mono_handler(const lcm_recv_buf_t *rbuf, const char* channel, const lcmt_stereo *msg, void *user) {
// open the frame of the video specified by the message
// check to see if the current video is the correct video file
if (current_video_number != msg->video_number) {
video_capture.release();
std::string newfile = GetMonoFilename(msg->timestamp, msg->video_number);
if (newfile.empty()) {
return;
}
std::cout << "Opening file: " << newfile << std::endl;
if (!video_capture.open(newfile)) {
std::cerr << "Failed to open file: " << newfile << std::endl;
return;
}
current_video_number = msg->video_number;
}
video_capture.set(CV_CAP_PROP_POS_FRAMES, msg->frame_number + frame_offset);
cv::Mat frame;
video_capture >> frame;
SendImageOverLcm(lcm_, image_channel, frame);
}
示例5: readFrameIndex
cv::Mat readFrameIndex(cv::VideoCapture cap, int frameNum){
int idx = frameNum-1;
cv::Mat frame;
bool success;
cap.set ( CV_CAP_PROP_POS_FRAMES , idx );
success = cap.retrieve(frame);
return frame;
}
示例6: Init
void Init(cv::VideoCapture& capture)
{
if(!capture.isOpened()){
std::cout << "error starting video capture" << std::endl;
exit(0);
}
//propose a resolution
capture.set(CV_CAP_PROP_FRAME_WIDTH, RESOLUTION_X);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, RESOLUTION_Y);
//get the actual (supported) resolution
ivWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH);
ivHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
std::cout << "camera/video resolution: " << ivWidth << "x" << ivHeight << std::endl;
cv::namedWindow("MOCTLD", 0); //CV_WINDOW_AUTOSIZE );
// cv::resizeWindow("MOCTLD", ivWidth, ivHeight);
cv::setMouseCallback("MOCTLD", MouseHandler);
}
示例7: shutterCB
void shutterCB(int pos, void* param){
struct timeval t;
cap.set(CV_CAP_PROP_EXPOSURE,pos);
//fcount=0; // Reset frame counter, so we dont have do wait for the avg to "catch" up
std::cout << "CALLBACK !!!: pos: " << pos << "Shutter read: " << cap.get(CV_CAP_PROP_EXPOSURE) << std::endl;
}
示例8: init
void init()
{
//摄像头
camera.open(0);
if(!camera.isOpened())
{
std::cout << "Can not find camera!" << std::endl;
exit(-1);
}
camera.set(cv::CAP_PROP_FRAME_WIDTH,160);
camera.set(cv::CAP_PROP_FRAME_HEIGHT,120);
/* double width = camera.get(cv::CAP_PROP_FRAME_WIDTH);
double height = camera.get(cv::CAP_PROP_FRAME_HEIGHT);
std::cout << "width:" << width << "\t";
std::cout << "height:" << height << "\t" << std::endl;*/
face_cascade_name = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
if(!face_cascade.load(face_cascade_name))
{
std::cout << "can not find face_cascade_file!" << std::endl;
exit(-1);
}
running = true;
pthread_mutex_init(&mutexLock, NULL);
pthread_create(&grabThread, NULL, grabFunc, NULL);
//Setup edi robot mraa control lib
spider_init();
signal(SIGINT, clear);
signal(SIGTERM, clear);
}
示例9: update
void OpenCVTemplateApp::update()
{
if(currentState == PLAY) {
frameIndex = video.get(cv::CAP_PROP_POS_FRAMES);
frameIndex += frameSpeed;
if(frameIndex >= video.get(cv::CAP_PROP_FRAME_COUNT)-1) {
frameIndex = 0;
}
video.set(cv::CAP_PROP_POS_FRAMES,frameIndex);
video.read(frame);
if(isGrayScale) {
cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04);
}
frameTexture = gl::Texture::create(fromOcv(frame));
}
}
示例10: main
/**
* @function main
*/
int main( int argc, char* argv[] ) {
// Filter
gFilterLimits.resize(6);
//gFilterLimits << -0.35, 0.35, -0.70, 0.70, 1.5, 2.4; // Kinect
gFilterLimits << -1.0, 1.0, -1.5, 1.5, 0.35, 2.0; // Asus on top of Crichton
ObjectsDatabase mOd;
mOd.init_classifier();
mOd.load_dataset();
gCapture.open( cv::CAP_OPENNI2 );
if( !gCapture.isOpened() ) {
printf("\t [ERROR] Could not open the capture object \n");
return -1;
}
gCapture.set( cv::CAP_PROP_OPENNI2_MIRROR, 0.0 );
gCapture.set( cv::CAP_PROP_OPENNI_REGISTRATION, -1.0 );
gF = (float)gCapture.get( cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH );
cv::namedWindow( gWindowName, cv::WINDOW_AUTOSIZE );
::google::InitGoogleLogging( argv[0] );
for(;;) {
if( !gCapture.grab() ) {
printf("\t * ERROR Could not grab a frame \n");
return -1;
}
gCapture.retrieve( gRgbImg, cv::CAP_OPENNI_BGR_IMAGE );
if( gIsSegmentedFlag ) { drawSegmented(); }
cv::imshow( gWindowName, gRgbImg );
gCapture.retrieve( gPclMap, cv::CAP_OPENNI_POINT_CLOUD_MAP );
cv::imshow( gWindowName, gRgbImg );
char k = cv::waitKey(30);
if( k == 'q' ) {
printf("\t Finishing program \n");
break;
}
/** Recognize */
else if( k == 'i' ) {
// Process image
process();
gLabels.resize(gClusters.size() );
gIndex.resize(gClusters.size() );
// Store images
for( int i = 0; i < gClusters.size(); ++i ) {
int xl = gBoundingBoxes[i](0);
int yl = gBoundingBoxes[i](1);
int xw = gBoundingBoxes[i](2)-gBoundingBoxes[i](0);
int yw = gBoundingBoxes[i](3)-gBoundingBoxes[i](1);
cv::Mat img( gRgbImg, cv::Rect( xl, yl,
xw, yw ) );
// Predict
mOd.classify( img, gIndex[i], gLabels[i] );
cv::putText( gRgbImg,
gLabels[i], cv::Point(gBoundingBoxes[i](0), gBoundingBoxes[i](1) ),
cv::FONT_HERSHEY_SIMPLEX, 1,
gColors[i],
2 );
mOd.sayIt(gIndex[i]);
}
} // else
} // for
} // main
示例11: output
void OutputDeviceWindow::output(cv::VideoCapture& in_video) {
const std::string WIN_NAME = "mudou_win";
const int KEY_CODE_ESC = 27;
const int KEY_CODE_J = 106;
const int KEY_CODE_K = 107;
TiledImageCreator tic;
cv::Mat image;
cv::namedWindow(WIN_NAME);
in_video >> image;
if(!image.empty()) {
if(image.rows < image.cols){
image_size = image.cols;
}else{
image_size = image.rows;
}
small_image_size = image_size / (1 << division);
if(small_image_size < 1){
small_image_size = 1;
}
tic.set_cell_size(small_image_size, small_image_size);
}
while(!image.empty()) {
cv::imshow(WIN_NAME, tic.create_tiled_img(image));
int key = cv::waitKey(1);
// std::cout << "key = " << key << std::endl;
if( key == KEY_CODE_ESC ) {
//break;
return;
}
if( key == KEY_CODE_J ) {
// 部品画像を小さくする
division++;
small_image_size = image_size / (1 << division);
if(small_image_size < min_small_image_size){
small_image_size = min_small_image_size;
division--;
}
tic.set_cell_size(small_image_size, small_image_size);
}
if( key == KEY_CODE_K ) {
// 部品画像を大きくする
division--;
if(division < 0){
division = 0;
}
small_image_size = image_size / (1 << division);
tic.set_cell_size(small_image_size, small_image_size);
}
in_video >> image;
if(image.empty()){
in_video.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
in_video >> image;
}
}
示例12: main
int main(int argc, char**argv)
{
capture.open(0);
if (capture.isOpened() == false)
{
std::cerr << "no capture device found" << std::endl;
return 1;
}
capture.set(CV_CAP_PROP_FRAME_WIDTH, vgaSize.width);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, vgaSize.height);
if (capture.get(cv::CAP_PROP_FRAME_WIDTH) != (double)vgaSize.width || capture.get(cv::CAP_PROP_FRAME_HEIGHT) != (double)vgaSize.height)
{
std::cerr << "current device doesn't support " << vgaSize.width << "x" << vgaSize.height << " size" << std::endl;
return 2;
}
cv::Mat image;
capture >> image;
cv::namedWindow(windowName);
cv::imshow(windowName, image);
initCuda();
initArray(image);
char key = -1;
enum device statusDevice = useCpuSimd;
enum precision statusPrecision = precisionFloat;
int index = 1;
cv::Mat stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
cv::Mat gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
double elapsedTime;
while (isFinish(key) == false)
{
capture >> image;
switch (key)
{
case 'h':
case 'H':
// switch to half precision
statusPrecision = precisionHalf;
std::cout << std::endl << header << "half " << std::endl;
stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
break;
case 'f':
case 'F':
// switch to single precision
statusPrecision = precisionFloat;
std::cout << std::endl << header << "single" << std::endl;
stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED);
gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data);
break;
case 'b':
case 'B':
// switch to gray gain
statusPrecision = precisionByte;
std::cout << std::endl << header << "char" << std::endl;
gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE);
break;
case '0':
case '1':
index = key - '0';
switch (statusPrecision)
{
case precisionHalf:
// precision half
stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
break;
case precisionFloat:
// precision single
stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED);
gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data);
break;
case precisionByte:
// precision single
gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE);
break;
default:
break;
}
break;
case 'c':
case 'C':
std::cout << std::endl << "Using CPU SIMD " << std::endl;
statusDevice = useCpuSimd;
break;
case 'g':
case 'G':
std::cout << std::endl << "Using GPU " << std::endl;
statusDevice = useGpu;
break;
default:
break;
}
if (statusDevice == useCpuSimd)
{
elapsedTime = multiplyImage(image, gain);
//.........这里部分代码省略.........
示例13: makeGUI
void OpenCVTemplateApp::makeGUI() {
interface->clear();
interface->addButton("load image", [this] {
auto path = ci::app::getOpenFilePath();
image = cv::imread(path.string());
std::cout <<"cols "<<image.cols << std::endl;
std::cout <<"rows "<<image.rows << std::endl;
std::cout <<"channels "<<image.channels() << std::endl;
imageTexture = gl::Texture::create(fromOcv(image));
});
interface->addButton("load video", [this] {
auto path = ci::app::getOpenFilePath();
video.open(path.string());
frameWidth = video.get(cv::CAP_PROP_FRAME_WIDTH);
frameHeight = video.get(cv::CAP_PROP_FRAME_HEIGHT);
totalFrames = video.get(cv::CAP_PROP_FRAME_COUNT);
video.read(frame);
if(isGrayScale) {
cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
}
frameTexture = gl::Texture::create(fromOcv(frame));
makeGUI();
});
interface->addSeparator();
if(frameTexture) {
interface->addParam("gray scale", &isGrayScale).updateFn([this] {
video.retrieve(frame);
if(isGrayScale) {
cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
}
frameTexture = gl::Texture::create(fromOcv(frame));
makeGUI();
});
interface->addParam("nb of feature",&nbOfFeaturePoints).min(1).max(1000);
if(isGrayScale) {
interface->addButton("get feature points", [this] {
cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04);
});
}
interface->addSeparator();
interface->addParam("frame",&frameIndex).min(0).max(totalFrames-1).step(1).updateFn([this] {
video.set(cv::CAP_PROP_POS_FRAMES,frameIndex);
video.read(frame);
if(isGrayScale) {
cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
}
frameTexture = gl::Texture::create(fromOcv(frame));
});
interface->addSeparator();
interface->addParam("speed", &frameSpeed).min(1).max(1000).step(1);
interface->addButton("play",[this] {
currentState = PLAY;
makeGUI();
});
if(currentState == PLAY) {
interface->addButton("pause",[this] {
currentState = PAUSE;
makeGUI();
});
}
}
}
示例14:
void *cameraThread(void *arg){
std::vector<cmdData> cmd;
cv::Mat frame;
cv::namedWindow("Color", CV_WINDOW_AUTOSIZE); //create a window with the name "MyWindow"
cv::namedWindow("Thresholded", CV_WINDOW_AUTOSIZE); //create a window with the name "HSV"
//cv::namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
int initShutter = 242;
//int initShutter = 0;
int shutterVal = initShutter;
int cannyMin = 30;
int blockSize = 89;
int fps = 60;
// Shutter slider
cv::createTrackbar("Shutter","Color",&shutterVal,4095,shutterCB,NULL);
// Canny treshold
cv::createTrackbar("Threshold","Color",&cannyMin,255,NULL,NULL);
cv::createTrackbar("BlockSize","Color",&blockSize,255,NULL,NULL);
cv::Mat colorFrame;
cv::Mat tresholdedFrame;
cv::Mat hsvFrame;
cv::Mat grey,tmp;
cv::Mat contourOutput;
cap.open(CV_CAP_DC1394); // Open first firewire camera. in 2.3 use CV_CAP, in 2.5 use CV::CAP
cap.set(CV_CAP_PROP_WHITE_BALANCE_BLUE_U,794); // 736
cap.set(CV_CAP_PROP_WHITE_BALANCE_RED_V,437);
cap.set(CV_CAP_PROP_EXPOSURE,initShutter); // "Shutter" in coriander
cap.set(CV_CAP_PROP_FPS,fps);
cap.set(CV_CAP_PROP_GAMMA,0);
cap.set(CV_CAP_PROP_GAIN,30);
while(runState){
cap >> frame;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
// Get color image, decode bayer BGGR.
cv::cvtColor(frame,colorFrame,CV_BayerBG2RGB,0);
cv::cvtColor(colorFrame, grey, CV_RGB2GRAY );
// Remove gripper from img
cv::Rect roi = cv::Rect(0,0,640,360);
cv::Mat submatrix = cv::Mat(grey,roi);
//submatrix.setTo(cv::Scalar(255));
cv::threshold(submatrix,tresholdedFrame,cannyMin,255,cv::THRESH_BINARY_INV);
if(blockSize % 2 == 0){
//Adaptive threshold block size SKAL være ulige..
blockSize = blockSize -1;
}
// cv::adaptiveThreshold(submatrix,tresholdedFrame,255,cv::ADAPTIVE_THRESH_GAUSSIAN_C,cv::THRESH_BINARY_INV,blockSize,0);
cv::Moments mu;
mu = cv::moments(tresholdedFrame,true);
// Find center
cv::Point2f mc = cv::Point2f( mu.m10/mu.m00 , mu.m01/mu.m00 );
// Count non zero pixels. Used for determining if we are screwed (getting large "white" areas.)
cameraError.areaOfObject = cv::countNonZero(tresholdedFrame);
// Draw it - convert to RGB to we can draw on it with colors
cv::cvtColor(tresholdedFrame, tresholdedFrame, CV_GRAY2RGB);
//cv::Mat drawing = cv::Mat::zeros( tresholdedFrame.size(), CV_8UC3 );
cv::Scalar color = cv::Scalar( 0,255,0 );
cv::circle( tresholdedFrame, mc, 5, color, -1, 8, 0 );
//.........这里部分代码省略.........
示例15: main
int main(int argc, char* argv[])
{
signal(SIGINT, quitFunction);
// Simple parsing of the parameters related to the image acquisition
int xRes = 640;
int yRes = 480;
int cameraIndex = 0;
if (argc > 2) {
xRes = std::atoi(argv[1]);
yRes = std::atoi(argv[2]);
}
if (argc > 3) {
cameraIndex = std::atoi(argv[3]);
}
// The source of input images
capture.open(cameraIndex);
if (!capture.isOpened())
{
std::cerr << "Unable to initialise video capture." << std::endl;
return 1;
}
#ifdef OPENCV3
capture.set(cv::CAP_PROP_FRAME_WIDTH, xRes);
capture.set(cv::CAP_PROP_FRAME_HEIGHT, yRes);
#else
capture.set(CV_CAP_PROP_FRAME_WIDTH, xRes);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, yRes);
#endif
cv::Mat inputImage;
// The tag detection happens in the Chilitags class.
chilitags::Chilitags chilitags;
// The detection is not perfect, so if a tag is not detected during one frame,
// the tag will shortly disappears, which results in flickering.
// To address this, Chilitags "cheats" by keeping tags for n frames
// at the same position. When tags disappear for more than 5 frames,
// Chilitags actually removes it.
// Here, we cancel this to show the raw detection results.
chilitags.setFilter(0, 0.0f);
cv::namedWindow("DisplayChilitags");
// Main loop, exiting when 'q is pressed'
for (; 'q' != (char) cv::waitKey(1) && sRunning; ) {
// Capture a new image.
capture.read(inputImage);
// Start measuring the time needed for the detection
int64 startTime = cv::getTickCount();
// Detect tags on the current image (and time the detection);
// The resulting map associates tag ids (between 0 and 1023)
// to four 2D points corresponding to the corners positions
// in the picture.
chilitags::TagCornerMap tags = chilitags.find(inputImage);
// Measure the processing time needed for the detection
int64 endTime = cv::getTickCount();
float processingTime = 1000.0f*((float) endTime - startTime)/cv::getTickFrequency();
// Now we start using the result of the detection.
// First, we set up some constants related to the information overlaid
// on the captured image
const static cv::Scalar COLOR(255, 0, 255);
// OpenCv can draw with sub-pixel precision with fixed point coordinates
static const int SHIFT = 16;
static const float PRECISION = 1<<SHIFT;
// We dont want to draw directly on the input image, so we clone it
cv::Mat outputImage = inputImage.clone();
for (const std::pair<int, chilitags::Quad> & tag : tags) {
int id = tag.first;
// We wrap the corner matrix into a datastructure that allows an
// easy access to the coordinates
const cv::Mat_<cv::Point2f> corners(tag.second);
// We start by drawing the borders of the tag
for (size_t i = 0; i < 4; ++i) {
cv::line(
outputImage,
PRECISION*corners(i),
PRECISION*corners((i+1)%4),
#ifdef OPENCV3
COLOR, 1, cv::LINE_AA, SHIFT);
#else
COLOR, 1, CV_AA, SHIFT);
#endif
}
// Other points can be computed from the four corners of the Quad.
// Chilitags are oriented. It means that the points 0,1,2,3 of
// the Quad coordinates are consistently the top-left, top-right,
// bottom-right and bottom-left corners.
//.........这里部分代码省略.........