本文整理汇总了C++中VideoCapture::retrieve方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoCapture::retrieve方法的具体用法?C++ VideoCapture::retrieve怎么用?C++ VideoCapture::retrieve使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoCapture
的用法示例。
在下文中一共展示了VideoCapture::retrieve方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: update
bool update(void) {
bool result;
if(useVideo == 0) {
result = cap.grab();
cap.retrieve(rgbImg, CV_CAP_OPENNI_BGR_IMAGE);
cap.retrieve(depthMap, CV_CAP_OPENNI_DEPTH_MAP);
}
else if(useVideo == 1) {
result = cap.grab();
cap.retrieve(rgbImg);
depthMap = Mat();
}
else if(useVideo == 2) {
result = (cap.grab() && cap1.grab());
if(result) {
cap.retrieve(rgbImg);
cap1.retrieve(depthImg);
for(int i=0; i<depthImg.rows; i++) {
for(int j=0; j<depthImg.cols; j++) {
depthMap.at<ushort>(i,j) = (int(depthImg.at<Vec3b>(i,j)[0])*256 + int(depthImg.at<Vec3b>(i,j)[1]));
}
}
}
}
return result;
}
示例2: grab_frame
int grab_frame(VideoCapture& capture, Mat& img, char* filename) {
// camera/image setup
if (!cv_init_) {
if (filename != NULL) {
capture.open(filename);
} else {
float scale = 0.52;//0.606;
int w = 640 * scale;
int h = 480 * scale;
capture.open(0); //try to open
capture.set(CV_CAP_PROP_FRAME_WIDTH, w); capture.set(CV_CAP_PROP_FRAME_HEIGHT, h);
}
if (!capture.isOpened()) { cerr << "open video device fail\n" << endl; return 0; }
capture.grab();
capture.retrieve(img);
if (img.empty()) { cout << "load image fail " << endl; return 0; }
printf(" img = %d x %d \n", img.rows, img.cols);
cv_init_ = 1;
}
// get frames
capture.grab();
capture.retrieve(img);
imshow("cam", img);
if (waitKey(10) >= 0) { return 0; }
else { return 1; }
}
示例3: main
int main(int argc, char* argv[])
{
VideoCapture cap; //
cap.open("http://192.168.1.139:1234/?action=stream");
if (!cap.isOpened()) // if not success, exit program
{
cout << "Cannot open the video cam" << endl;
return -1;
}
clientSocket = socket(PF_INET, SOCK_DGRAM, 0);
serverAddr.sin_family = AF_INET;
serverAddr.sin_port = htons(33333);
serverAddr.sin_addr.s_addr = inet_addr("192.168.1.139");
memset(serverAddr.sin_zero, '\0', sizeof serverAddr.sin_zero);
addr_size = sizeof serverAddr;
while (1)
{
cap.grab();
cap.retrieve(frame);
adjusted_color_img = brightness_and_contrast(frame);
color_detection(adjusted_color_img);
imshow("view", frame);
waitKey(20);
usleep(2000);
}
}
示例4: main
int main(int argc, char *argv[]){
ros::init(argc,argv,"aruco_test");
ros::NodeHandle nh;
namedWindow("detection_result");
startWindowThread();
namedWindow("board");
startWindowThread();
/* Mat distCoeffs = (Mat_<float>(1,5) << 0.182276,-0.533582,0.000520,-0.001682,0.000000);
Mat camMatrix = (Mat_<float>(3,3) <<
743.023418,0.000000,329.117496,
0.000000,745.126083,235.748102,
0.000000,0.000000,1.000000);*/
VideoCapture input;
input.open(0);
Mat boardImg;
createBoard(boardImg);
imshow("board",boardImg);
while(input.grab() && ros::ok()){
Mat image,result;
input.retrieve(image);
process(image,result);
imshow("detection_result",result);
}
return 0;
}
示例5: getDepthImage
int getDepthImage(VideoCapture capture, Mat &depth_image){
//depth image
if( !capture.grab() )
{
cout << "Can not grab images." << endl;
return -1;
}
else
{
if( capture.retrieve( depth_image, CAP_OPENNI_DEPTH_MAP ) )
{
const float scaleFactor = 0.05f;
//Mat show;
depth_image.convertTo( depth_image, CV_8UC1, scaleFactor );
//imshow( "depth map", depth_image );
}
}
return 0;
}
示例6: LOGD
JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1retrieve__JJ
(JNIEnv* env, jclass, jlong self, jlong image_nativeObj)
{
try {
#ifdef DEBUG
LOGD("highgui::VideoCapture_n_1retrieve__JJ()");
#endif // DEBUG
VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL
Mat& image = *((Mat*)image_nativeObj);
bool _retval_ = me->retrieve( image );
return _retval_;
} catch(cv::Exception e) {
#ifdef DEBUG
LOGD("highgui::VideoCapture_n_1retrieve__JJ() catched cv::Exception: %s", e.what());
#endif // DEBUG
jclass je = env->FindClass("org/opencv/core/CvException");
if(!je) je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, e.what());
return 0;
} catch (...) {
#ifdef DEBUG
LOGD("highgui::VideoCapture_n_1retrieve__JJ() catched unknown exception (...)");
#endif // DEBUG
jclass je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1retrieve__JJ()}");
return 0;
}
}
示例7: vIdle
void vIdle() {
if (TheCaptureFlag) {
// capture image
TheVideoCapturer.grab();
TheVideoCapturer.retrieve(TheInputImage);
TheUndInputImage.create(TheInputImage.size(), CV_8UC3);
// by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
cv::cvtColor(TheInputImage, TheInputImage, CV_BGR2RGB);
// remove distorion in image
cv::undistort(TheInputImage, TheUndInputImage, TheCameraParams.CameraMatrix,
TheCameraParams.Distorsion);
// detect markers
MDetector.detect(TheUndInputImage, TheMarkers);
// Detection of the board
TheBoardDetected.second = TheBoardDetector.detect(
TheMarkers, TheBoardConfig, TheBoardDetected.first, TheCameraParams, TheMarkerSize);
// chekc the speed by calculating the mean speed of all iterations
// resize the image to the size of the GL window
cv::resize(TheUndInputImage, TheResizedImage, TheGlWindowSize);
// create mask. It is a syntetic mask consisting of a simple rectangle, just to show a example of
// opengl with mask
TheMask = createSyntheticMask(TheResizedImage); // lets create with the same size of the resized
// image, i.e. the size of the opengl window
}
glutPostRedisplay();
}
示例8: video_thread_CL
void video_thread_CL(void* pParams)
{
FaceDetector *faceDetector;
if (threadUseCL){
faceDetector = (FaceDetectorCL*)pParams;
}
else{
faceDetector = (FaceDetectorCpu*)pParams;
}
std::string name = faceDetector->name();
//HAAR_EYE_TREE_EYEGLASSES_DATA
//HAAR_EYE_DATA
//HAAR_FRONT_FACE_DEFAULT_DATA
//LBP_FRONTAL_FACE
//LBP_PROFILE_FACE
faceDetector->load(HAAR_FRONT_FACE_DEFAULT_DATA);
VideoCapture videoCapture;
cv::Mat frame, frameCopy, image;
videoCapture.open(faceDetector->videoFile().c_str());
if (!videoCapture.isOpened()) { cout << "No video detected" << endl; return; }
if (imshowFlag) { cv::namedWindow(name.c_str(), 1); }
if (videoCapture.isOpened())
{
cout << "In capture ..." << name.c_str() << endl;
while (videoCapture.grab())
{
if (!videoCapture.retrieve(frame, 0)) { break; }
faceDetector->setSrcImg(frame, 1);
faceDetector->doWork();
if (imshowFlag){ cv::imshow(name.c_str(), faceDetector->resultMat()); }
std::vector<cv::Rect> &faces_result = faceDetector->getResultFaces();
std::cout << "face --" << name.c_str() << std::endl;
for (int i = 0; i < faces_result.size(); ++i){
std::cout << faces_result.at(i).x << ", " << faces_result.at(i).y << std::endl;
}
if (waitKey(10) >= 0){
videoCapture.release();
break;
}
Sleep(1);
}
}
if (imshowFlag) { cvDestroyWindow(name.c_str()); }
finishTaskFlag++;
_endthread();
return;
}
示例9: threadGrab0
void threadGrab0(){
cap0.grab();
cap0.retrieve(img0);
Mat Tmp = img0.t(); //Rotate Image
flip(Tmp,img0,0);
leftImgs.push_back(img0);
//outputVideocap0.write(img0);
}
示例10: threadGrab1
void threadGrab1(){
cap1.grab();
cap1.retrieve(img1);
Mat Tmp = img1.t(); //Rotate Image
flip(Tmp,img1,1);
rightImgs.push_back(img1);
//outputVideocap1.write(img1);
}
示例11: getFrames
ImagePacket getFrames(VideoCapture capture) {
Mat depthMap;
Mat color;
Mat uvMap;
capture.grab();
capture.retrieve( depthMap, CV_CAP_INTELPERC_DEPTH_MAP );
transpose(depthMap, depthMap);
capture.retrieve(color, CV_CAP_INTELPERC_IMAGE );
transpose(color, color);
capture.retrieve(uvMap, CV_CAP_INTELPERC_UVDEPTH_MAP);
//transpose(uvMap, uvMap);
return ImagePacket(color, depthMap, uvMap);
}
示例12: checkFeed
/**
* parameter cam - Camera that will be used
* parameter src - Source matrix that will be shown
*/
void checkFeed(VideoCapture cam, Mat src){
if(!cam.isOpened()){
cout << "Camera not detected.";
}
if (cam.grab()){
if (!cam.retrieve(src)){
cout << "Could not retrieve.";
}
}
}
示例13: getNextFrame
bool getNextFrame(VideoCapture& video, Mat& frame, int& framenum, int jump = 1)
{
// firstGot should regulate itself so it'll reset when a video runs out of frames
bool moreFrames = true;
if(firstGot) // not first frame
{
bool val1 = true;
for(int i = 0; i < jump; i++)
if(!video.grab())
val1 = false;
bool val2 = video.retrieve(frame);
framenum += jump;
if(!val1 || !val2)
{
firstGot = false; // this means video ended
moreFrames = false;
}
}
else // first frame
{
bool val = video.grab();
firstGot = true;
if(!val)
{
printf("first frame val failed on grab\n");
firstGot = false;
moreFrames = false;
}
val = video.retrieve(frame);
if(!val)
{
printf("first frame failed on retreive\n");
firstGot = false;
moreFrames = false;
}
framenum++;
}
return moreFrames;
}
示例14: retrieve
bool retrieve(Mat *image)
{
bool bSuccess = camera_.retrieve(frame_); // grab a new frame from video
if (!bSuccess) //if not success, break loop
{
printf("Cannot grab a frame from video stream in camera %d \n", camNum_);
return false;
}
*image = frame_;
printf("Grabbed a frame from video stream in camera %d \n", camNum_);
return true;
}
示例15: main
int main(){
pthread_t thread;
int threadSuccess = pthread_create(&thread, NULL, imageTakingThread, NULL);
cout << "got one photo\n";
imwrite("1st.jpg",mat);
int a;
cin >> a;
Mat matToProcess;
vidCap.retrieve(matToProcess);
imwrite("2nd.jpg",matToProcess);
cout<<"done"<<endl;
}