本文整理汇总了C++中VideoWriter::release方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoWriter::release方法的具体用法?C++ VideoWriter::release怎么用?C++ VideoWriter::release使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoWriter
的用法示例。
在下文中一共展示了VideoWriter::release方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: processVideo
void HOGDetectorGPU::processVideo(char* ptrNameInput, char* ptrNameOutput){
VideoCapture inputVideo(ptrNameInput);
VideoWriter outputVideo;
Size sizeVideo = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH),(int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));
Mat* ptrMatOut;
outputVideo.open(ptrNameOutput, CV_FOURCC('x','v','i','d'), inputVideo.get(CV_CAP_PROP_FPS), sizeVideo, true);
Mat* ptrMat;
vector<Mat> spl;
while(inputVideo.grab()){
ptrMat = new Mat();
inputVideo >> *ptrMat; // get a new frame from video
Mat finalMat;
split(*ptrMat, spl);
cvtColor(*ptrMat, *ptrMat, CV_BGR2GRAY);
ptrMatOut = detectPeople(ptrMat);
spl[0] = *(ptrMatOut);
spl[1] = *(ptrMatOut);
spl[2] = *(ptrMatOut);
merge(spl, finalMat);
outputVideo << finalMat;
imshow("edges", *(ptrMatOut));
if(waitKey(30) >= 0) break;
//Deletes the processed frame
delete ptrMatOut;
}
outputVideo.release();
}
示例2: writeVideo
void RApplication::writeVideo()
{
char drive[_MAX_DRIVE];
char dir[_MAX_DIR];
char fname[_MAX_FNAME];
char ext[_MAX_EXT];
_splitpath_s( Pubvar::videoPath.c_str(), drive, dir, fname, ext);
string outputPath = "";
int start = 0, end = 0, idx = 0, winsize = 8 * fps;
VideoWriter writer;
VideoCapture capCut;
CV_Assert(capCut.open(Pubvar::videoPath));
Mat cutFrame;
for (int i = winsize; i < videoLen; ++i)
{
if(fidOI[i])
{
outputPath = "data/" + (string)fname + "_" + to_string(idx++) + ext;
writer = VideoWriter(outputPath, CV_FOURCC('D', 'I', 'V', '3') , fps, frame.size());
capCut.set(CV_CAP_PROP_POS_FRAMES, i-winsize);
for(int j=i-winsize ; j<=i ; j++)
{
capCut >> cutFrame;
writer << cutFrame;
}
writer.release();
}
}
}
示例3: calcularHistogramaAcumulatOF
/* Funció que calcula l'histograma acumulat de cada una de les execucions d'una activitat */
Mat HistogramaOF::calcularHistogramaAcumulatOF(String path, int num_imatges, String nom_activitat, int num_repeticio, string ruta, bool video) {
String nomVideo, nomImatgeRGBa, nomImatgeDa, nomImatgeRGBb, nomImatgeDb;
VideoWriter outputVideo;
if(video) {
double fps = 15;
CvSize mida = cvSize(399, 240);
nomVideo = ruta+"/"+nom_activitat+"_acumulat_"+to_string(num_repeticio)+".avi";
outputVideo.open(nomVideo, 0, fps, mida, true);
}
Mat imageA, depthA, imageB, depthB, resultat;
nomImatgeRGBa = path+"/c_0.png";
nomImatgeDa = path+"/d_0.png";
imageA = imread(nomImatgeRGBa, IMREAD_COLOR);
depthA = imread(nomImatgeDa, IMREAD_GRAYSCALE);
resultat = calcularHistogramaOF(imageA, imageA, depthA, depthA);
if(video) outputVideo << resultat;
for(int k = 1; k <= num_imatges; ++k) {
cout << "Imatge: " << num_repeticio << " - " << k << "/" << num_imatges << endl;
nomImatgeRGBb = path+"/c_"+to_string(k)+".png";
nomImatgeDb = path+"/d_"+to_string(k)+".png";
imageB = imread(nomImatgeRGBb, IMREAD_COLOR);
depthB= imread(nomImatgeDb, IMREAD_GRAYSCALE);
resultat = calcularHistogramaOF(imageA, imageB, depthA, depthB);
if(video) outputVideo << resultat;
nomImatgeRGBa = nomImatgeRGBb;
nomImatgeDa = nomImatgeDb;
imageB.copyTo(imageA);
depthB.copyTo(depthA);
}
if(video) outputVideo.release();
return repr;
}
示例4: GlobalExits
VOID inline GlobalExits(VOID)
{
g_writer.release();
g_cap.release();
g_captureStat = capture_IDLE;
SecureEndThread(threadStat);
cv::destroyAllWindows();
g_runningONE = FALSE;
}
示例5: main
int main(int argc, char* argv[])
{
// 基本画像データ
Mat src = Mat::zeros(480, 640, CV_8UC1);
Mat tmp;
// 録画用リソース確保
#ifdef _REC
VideoWriter vw = VideoWriter("./test.avi", CV_FOURCC('X','V','I','D'), 30, Size(640, 480), false);
#endif /* _REC */
char time[256];
int hh=0, mm=0, ss=0, frame=0;
string wname = "window";
namedWindow(wname);
moveWindow(wname, 61, 0);
while (1) {
src.copyTo(tmp);
sprintf_s(time, 256, "%02d:%02d:%02d.%02d", hh, mm, ss, frame);
putText(tmp, time, Point(100, 100), FONT_HERSHEY_SIMPLEX, 1.5, Scalar::all(200), 1, CV_AA);
if (29 < ++frame) {
frame = 0;
if (59 < ++ss) {
ss = 0;
if (59 < ++mm) {
mm = 0;
++hh;
}
}
}
// 以下で録画秒数指定
if (0 < mm)
break;
#ifdef _REC
vw << tmp;
#endif /* _REC */
imshow(wname, tmp);
waitKey(30);
}
destroyAllWindows();
#ifdef _REC
vw.release();
#endif /* _REC */
tmp.release();
return 0;
}
示例6: run
void run(){
depth=Mat(480,640,DataType<float>::type);
pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr nuage3(&nuage2);// (new pcl::PointCloud<pcl::PointXYZRGB>);
pcl::PointXYZRGBA point;
it=1000;
pcl::OpenNIGrabber* interface =new pcl::OpenNIGrabber();//creation d'un objet interface qui vient de l'include openni_grabber
//namedWindow( "Display Image", CV_WINDOW_AUTOSIZE );
namedWindow( "Harris Image", CV_WINDOW_AUTOSIZE );
//namedWindow( "Depth Image", CV_WINDOW_AUTOSIZE );
// VideoCapture capture(1);
// Mat frame;
// capture >> frame;
// record=VideoWriter("/home/guerric/Bureau/test.avi", CV_FOURCC('M','J','P','G'), 30, frame.size(), true);
boost::function<void(const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr&)>
f = boost::bind (&ImageVIewer::cloud_cb_, this, _1);
boost::function<void(const boost::shared_ptr<openni_wrapper::Image>&)>
g = boost::bind (&ImageVIewer::image_cb_, this, _1);
boost::function<void(const boost::shared_ptr<openni_wrapper::DepthImage>&)>
h = boost::bind (&ImageVIewer::depth_cb_, this, _1);
interface->registerCallback (f);
interface->registerCallback (g);
interface->registerCallback (h);
interface->start();
//on reste dans cet état d'acquisition tant qu'on ne stoppe pas dans le viewer
while(!viewer.wasStopped()){
boost::this_thread::sleep(boost::posix_time::seconds(1)); //met la fonction en attente pendant une seconde <=> sleep(1) mais plus précis pour les multicores
viewer.showCloud(nuage3);
}
interface->stop();
record.release();
destroyAllWindows();
}
示例7: DumpMovie
// Save the current state of the buffer to a file
XnStatus MovieMng::DumpMovie(void)
{
static VideoWriter writer_image;
static VideoWriter writer_depth;
Mat image(m_nImageHeight, m_nImageWidth, CV_8UC3);
Mat depth(m_nDepthHeight, m_nDepthWidth, CV_16UC1);
Mat detect(m_nDepthHeight, m_nDepthWidth, CV_16UC1);
Mat detect8UC1(m_nDepthHeight, m_nDepthWidth, CV_8UC1);
Mat detect8UC3(m_nDepthHeight, m_nDepthWidth, CV_8UC3);
static double scale = 255.0/65535.0;
static XnUInt64 nLastDepthTime = 0;
static XnUInt64 nLastImageTime = 0;
static XnUInt32 nMissedDepthFrames = 0;
static XnUInt32 nMissedImageFrames = 0;
static XnUInt32 nDepthFrames = 0;
static XnUInt32 nImageFrames = 0;
SingleFrame* pFrames = m_pFrames + m_nWritten;
// ファイル変更チェック
XnUInt64 timeStamp = pFrames->depthFrame.Timestamp();
if (Util::GetTimeDiff(timeStamp, m_fOpenTimestamp) > (m_nMovieLength * 60 * 1000))
{
m_fOpenTimestamp = timeStamp;
writer_image.release();
}
// ファイルオープン
if(!writer_image.isOpened())
{
char timeStr[TIME_STR_LEN];
g_util.GetTimeStr(pFrames->depthFrame.Timestamp(), sizeof(timeStr), timeStr);
Util::ChangeIntoNum(timeStr);
// ファイル名作成
XnChar strFileName[XN_FILE_MAX_PATH];
if (m_bDepth)
{
sprintf(strFileName, "%s/%s/%s%s.avi", m_strDirName, "depth/movie", "depth_", timeStr);
writer_depth.open(strFileName, CV_FOURCC('x','v','i','d'), 29.97, Size(m_nDepthWidth,m_nDepthHeight));
}
if (m_bImage)
{
sprintf(strFileName, "%s/%s/%s%s.avi", m_strDirName, "video/movie", "image_", timeStr);
writer_image.open(strFileName, CV_FOURCC('x','v','i','d'), 29.97, Size(640,480));
}
}
XnUInt64 nTimestamp;
if (m_bDepth)
{
#ifdef LOG_WRITE_ENABLE
fprintf(mmng_fp, "[%s] Depth Write. buffer:%d FrameId:%d Addr:0x%x\n",
__FUNCTION__, m_nWritten, pFrames->depthFrame.FrameID(), pFrames);
#endif
// フレーム落ちチェック
nTimestamp = pFrames->depthFrame.Timestamp();
int missed = Util::CheckMissedFrame("depth", nTimestamp, &nDepthFrames, &nLastDepthTime, &nMissedDepthFrames);
AddDummyFrame(writer_depth, missed);
memcpy(depth.data, pFrames->depthFrame.Data(), depth.step*depth.rows);
detect = depth.clone();
detect.convertTo(detect8UC1, CV_8UC1, scale, 0.0); //16UC1to8UC1 画像として保存する場合に必要
cvtColor(detect8UC1, detect8UC3, CV_GRAY2RGB); //8UC1to8UC3 動画として保存する場合に必要
// imshow("depth", depth);
writer_depth<<detect8UC3;
}
if (m_bImage)
{
#ifdef LOG_WRITE_ENABLE
fprintf(mmng_fp, "[%s] Image Write. buffer:%d FrameId:%d Addr:0x%x\n",
__FUNCTION__, m_nWritten, pFrames->imageFrame.FrameID(), pFrames);
#endif
// フレーム落ちチェック
nTimestamp = pFrames->imageFrame.Timestamp();
int missed = Util::CheckMissedFrame("image", nTimestamp, &nImageFrames, &nLastImageTime, &nMissedImageFrames);
AddDummyFrame(writer_image, missed);
memcpy(image.data, pFrames->imageFrame.Data(), image.step*image.rows);
cvtColor(image, image, CV_BGR2RGB);
// imshow("image", image);
writer_image << image;
}
return XN_STATUS_OK;
}
开发者ID:beckman16,项目名称:Real_Time_Head_Pose_Estimation_from_Consumer_Depth_Cameras_Client_Server,代码行数:95,代码来源:MovieMng.cpp
示例8:
~GenericClassnameOneTracker9000()
{
tracking_recorder.release();
logger.close();
}
示例9: main
//.........这里部分代码省略.........
if(noFoundCount > 1)
{
closest_dist = 1e8;
for( size_t i = 0; i < circles.size(); i++ )
{
Point center_circle(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius_circle = cvRound(circles[i][2]);
if( radius_circle < 6 )
continue;
/*
cv::Rect bBox;
bBox = cv::boundingRect(circles[i]);
Point center;
center.x = bBox.x + bBox.width / 2;
center.y = bBox.y + bBox.height / 2;
*/
int diff_x = center_circle.x - noFoundStartPt.x;
int diff_y = center_circle.y - noFoundStartPt.y;
int distance = diff_x * diff_x + diff_y * diff_y;
if( distance < closest_dist)
{
closest_dist = distance;
best_ball_center = center_circle;
//best_ball_box = bBox;
ballFound = true;
}
}
if(ballFound)
{
//measurement.at<float>(0) = best_ball_center.x;
//measurement.at<float>(1) = best_ball_center.y;
//Mat estimated = KF.correct(measurement);
KF.statePost.at<float>(0) = best_ball_center.x;
KF.statePost.at<float>(1) = best_ball_center.y;
KF.statePost.at<float>(2) = 0;
KF.statePost.at<float>(3) = 0;
prev_box = best_ball_box;
prev_motion = Point2f(0, 0);
noFoundCount = 0;
}
else {
cout << "UNABLE TO CORRECT..." << endl;
}
}
noFoundCount++;
cout << "NO FOUND: " << noFoundCount << endl;
}
// rendering result
line( result, statePt, predictPt, CV_RGB(255,0,255), 2 );
circle( result, predictPt, 2, CV_RGB(255,0,255), 2 );
circle( result, best_ball_center, 2, CV_RGB(255,255,255), 2 );
rectangle( result, best_ball_box, CV_RGB(0,255,0), 2 );
// Optical Flow
/*
for (size_t i = 0; i < optFlow_ball_centers.size(); i++)
{
line( result, prev_ball_centers[i], optFlow_ball_centers[i], CV_RGB(120,70,255), 2 );
circle( result, optFlow_ball_centers[i], 2, CV_RGB(120,70,255), 2 );
}
*/
// Hough
/*
for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
{
Point center(cvRound(circles[circle_i][0]), cvRound(circles[circle_i][1]));
int radius = cvRound(circles[circle_i][2]);
circle( result, center, radius, Scalar(12,12,255), 2 );
}
*/
prev_ball_centers = cur_contour_centers;
imshow("Result Window", result);
outputVideo.write( result );
/* UPDATE FRAME */
cur_frame.copyTo( prev_frame );
/* KEY INPUTS */
int keynum = waitKey(30) & 0xFF;
if(keynum == 113) // press q
break;
else if(keynum == 32) // press space
{
keynum = 0;
while(keynum != 32 && keynum != 113)
keynum = waitKey(30) & 0xFF;
if(keynum == 113)
break;
}
}
inputVideo.release();
outputVideo.release();
}
示例10: main
int main(int argc, char *argv[])
{
int frame_num = 0;
int non_decode_frame =0;
int count = 1, idx =0;
int frame_pos =0;
int p = 0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
MOG2 = createBackgroundSubtractorMOG2(); //MOG2 approach
std::cout<<"Video File "<<argv[1]<<std::endl;
// cv::VideoCapture input_video(argv[1]);
cv::VideoCapture cap("IM-4559-%04d.png");
namedWindow("My_Win",1);
cvSetMouseCallback("My_Win", mouse_click, 0);
sleep(1);
while(cap.grab())
{
cap_img.release();
p++;
if(cap.retrieve(cap_img))
{
imshow("My_Win", cap_img);
if(!got_roi)
{
//Wait here till user select the desire ROI
waitKey(0);
}
else
{
std::cout<<"Got ROI disp prev and curr image"<<std::endl;
std::cout<<"PT1"<<pt1.x<<" "<<pt1.y<<std::endl;
std::cout<<"PT2"<<pt2.x<<" "<<pt2.y<<std::endl;
if(vw.isOpened()){
std::cout<<"VW Opened\n";
}
Mat curr_img_t1;
Mat roi2(cap_img,Rect(pt1, pt2));
Mat curr_imgT = roi2.clone();
MOG2->apply(curr_imgT, fgMaskMOG2);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(curr_imgT, cv::Point(10, 2), cv::Point(100,20),cv::Scalar(255,255,255), -1);
ss << vw.get(CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(curr_imgT, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
float morph_size = 2;
Mat grad;
Mat canny_output;
Mat thrld;
Mat element = getStructuringElement(MORPH_RECT,Size(2*morph_size+1,2*morph_size+1), Point(morph_size,morph_size));
morphologyEx(fgMaskMOG2,grad,MORPH_GRADIENT,element, Point(-1,-1),1);
Canny(curr_imgT, canny_output, thresh,thresh*2 , 3);
findContours( canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) );
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
}
namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
threshold(grad,thrld,200,0,3);
char file_name[100];
sprintf(file_name, "final%d.png",p);
//show the current frame and the fg masks
imwrite(file_name,drawing);
imshow("background", fgMaskMOG2);
waitKey(30);
}
}
std::cout<<p<<std::endl;
}
vw.release();
}
示例11: str
//.........这里部分代码省略.........
UI::drawNextDestination(grayImage, env->getDestinations(), nDestino);
threadAttr->data.destino.x = env->getDestination(nDestino)->x;
threadAttr->data.destino.y = env->getDestination(nDestino)->y;
}
// informacion en la pantalla.
string line1 = format("SET-> Pitch: %.3f, Roll: %.3f, Yaw: %.3f, Dist: %d, D_x: %d, D_y: %d",
threadAttr->data.copterSets.pitch, threadAttr->data.copterSets.roll,
threadAttr->data.copterSets.yaw, threadAttr->data.dist_destino.z, threadAttr->data.dist_destino.x, threadAttr->data.dist_destino.y);
string line2 = format(
"GET--> Pitch: %.3f , Roll: %.3f, Yaw: %.3f, Altitude: %d cm, Desired Yaw: %.3f",
threadAttr->data.copterValues.pitch, threadAttr->data.copterValues.roll,
threadAttr->data.copterValues.yaw, threadAttr->data.copterValues.altitude, threadAttr->data.desired_yaw);
string line3 = format(
"Vel-Y: %f, Vel-X: %f, Vel-Z: %f cm/s",
threadAttr->data.copterValues.vy, threadAttr->data.copterValues.vx,
threadAttr->data.copterValues.vz);
string line4 = format("Battery: %d %%, State: %s",
threadAttr->data.copterValues.battery, threadAttr->data.copterValues.ctrl_state_sz.c_str());
string line5 = format("POSICION--> X: %d, Y: %d, Z: %d",
threadAttr->data.tPos.x, threadAttr->data.tPos.y, threadAttr->data.tPos.z);
UI::drawText(grayImage, line1, ar::Point(10, 10), CF_RED);
UI::drawText(grayImage, line2, ar::Point(10, 30), CF_BLUE);
UI::drawText(grayImage, line3, ar::Point(10, 50), CF_BLUE);
UI::drawText(grayImage, line4, ar::Point(10, 70), CF_BLUE);
UI::drawText(grayImage, line5, ar::Point(10, 90), CF_BLACK);
UI::drawMaxView(grayImage, threadAttr->data);
UI::drawWindow("Deteccion", grayImage);
//Genero los graficos sobre PID
Mat graficos(cvSize(imgSize.width, HEIGHT_GRAF), CV_8UC3,
cv::Scalar(255, 255, 255));
UI::drawGraphics(graficos, threadAttr->data);
vp_os_mutex_unlock(&threadAttr->mutex2);
UI::drawWindow("Graficos", graficos);
// compongo dos imagenes
int height = imgSize.height;
if(imgSize.height < HEIGHT_GRAF)
height = HEIGHT_GRAF;
Mat grande(cvSize(imgSize.width * 2, height), CV_8UC3,
cv::Scalar(255, 255, 255));
grayImage.copyTo(
grande.colRange(0, imgSize.width).rowRange(0, imgSize.height));
graficos.copyTo(
grande.colRange(imgSize.width, imgSize.width * 2).rowRange(0,
HEIGHT_GRAF));
// fin de composicion.
env->getVideo()->grabVideoAndData(PROYECT_PATH + "videos/", "avi",
str + "_2", outputVideo2, grande);
// env->getVideo()->grabVideoAndData(PROYECT_PATH + "videos/", "avi",
// str + "_3", outputVideo3, segmentated);
UI::drawWindow("redMorphology", segmentated);
char k = cvWaitKey(bContinue) & 0xff;
vp_os_mutex_lock(&threadAttr->mutex2);
//sem_wait(&threadAttr->mutex);
threadAttr->data.key = k;
vp_os_mutex_unlock(&threadAttr->mutex2);
//sem_post(&threadAttr->mutex);
switch (k)
{
case 27:
case 'q':
case 'Q':
quit = true;
break;
case 's':
case 'S':
bContinue = false;
break;
case 'n':
case 'N':
bContinue = true;
break;
}
}
outputVideo.release();
outputVideo2.release();
outputVideo3.release();
return 0;
}
示例12: main
//.........这里部分代码省略.........
int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
if(!capture) cout << "Capture from CAM " << c << " didn't work" << endl;
}
else if( inputName.size() )
{
image = imread( inputName, 1 );
if( image.empty() )
{
capture = cvCaptureFromAVI( inputName.c_str() );
if(!capture) cout << "Capture from AVI didn't work" << endl;
}
}
else
{
image = imread( "lena.jpg", 1 );
if(image.empty()) cout << "Couldn't read lena.jpg" << endl;
}
// cvNamedWindow( "result", 1 );
if( capture )
{
cout << "In capture ..." << endl;
for(;;)
{
IplImage* iplImg = cvQueryFrame( capture );
frame = cv::cvarrToMat(iplImg);
if( frame.empty() )
break;
if( iplImg->origin == IPL_ORIGIN_TL )
frame.copyTo( frameCopy );
else
flip( frame, frameCopy, 0 );
wframe = detectAndDraw( frameCopy, cascade, nestedCascade, scale, tryflip );
c= waitKey(10);
if( c=='e' )
{
return 0;
goto _cleanup_;
}
}
waitKey(0);
_cleanup_:
cvReleaseCapture( &capture );
vw.release();
}
else
{
cout << "In image read" << endl;
if( !image.empty() )
{
wframe = detectAndDraw( image, cascade, nestedCascade, scale, tryflip );
waitKey(0);
}
else if( !inputName.empty() )
{
/* assume it is a text file containing the
list of the image filenames to be processed - one per line */
FILE* f = fopen( inputName.c_str(), "rt" );
if( f )
{
char buf[1000+1];
while( fgets( buf, 1000, f ) )
{
int len = (int)strlen(buf), c;
while( len > 0 && isspace(buf[len-1]) )
len--;
buf[len] = '\0';
cout << "file " << buf << endl;
image = imread( buf, 1 );
if( !image.empty() )
{
wframe = detectAndDraw( image, cascade, nestedCascade, scale, tryflip );
c = waitKey(0);
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
else
{
cerr << "Aw snap, couldn't read image " << buf << endl;
}
}
fclose(f);
}
}
}
cvDestroyWindow("result");
return 0;
}
示例13: test_video
//.........这里部分代码省略.........
}
// Get the frame rate
double rate = video.get(CV_CAP_PROP_FPS);
cout << " Frame rate : " << rate << endl;
cout << " Input video codec :" << video.get(CV_CAP_PROP_FOURCC);
// initilaize the video writer object to write the video output
std::string outputFile(OUT_Video_File);
VideoWriter writer;
int codec = static_cast<int>(video.get(CV_CAP_PROP_FOURCC));
//int codec = CV_FOURCC('M', 'J', 'P', 'G');
bool isWriterInitialized = false;
int num_of_vehicles = 0;
bool end_of_process = false;
while (!end_of_process)
{
video >> img;
if (img.empty())
break;
draw = img.clone();
Mat cropped;
cv::resize(draw, cropped, Size(720, 560));
Mat temp, temp3;
cvtColor(cropped, temp, COLOR_BGR2GRAY);
/*Mat bgr[3]; //destination array
split(temp3,bgr);//split source
temp = bgr[0]+bgr[2];
*/
if (isWriterInitialized) {
//execute only once
isWriterInitialized = true;
/*writer.open(outputFile,
capture.get(CV_CAP_PROP_FOURCC),
capture.get(CV_CAP_PROP_FPS),
Size(capture.get(CV_CAP_PROP_FRAME_WIDTH),capture.get(CV_CAP_PROP_FRAME_HEIGHT)),
true);*/
writer.open(outputFile, codec, rate, cropped.size(), true);
}
locations.clear();
// Rect(x,y,w,h) w->width=cols;h->rows
// first remove the upper 50% from height Original Cropped =size(720,560)=(cols,rows)
Mat roi = temp(Rect(0, temp.rows*0.5, temp.cols, temp.rows - temp.rows*0.5));
//size(roi) = size(720,280)
//cout<<"roi.size() = "<<roi.size()<<endl;
int y_offset = temp.rows*0.5;
//again crop the lower 10 % to remove the images near dashboard-> remove false positives
roi = roi(Rect(0, 0, roi.cols, roi.rows - 100));
//cout<<"roi.size() = "<<roi.size()<<endl;
//no offset required as this is the lower row colums.
//hog.detectMultiScale(roi, locations);
//hog.detectMultiScale(roi, locations, 1, Size(50, 50), Size(32, 32), 1, 2);//对图片进行多尺度行人检测
hog.detectMultiScale(roi, locations, 0.00, Size(4, 8), Size(0, 0), 1.05, 2);//less false positive
//hog.detectMultiScale(roi, locations, 0.00, Size(8,8), Size(0,0), 1.05, 2);// less true negative(missed)
// add the offset
std::vector<Rect>::iterator it = locations.begin();
std::vector<Rect>::iterator itend = locations.end();
vector<Rect> actuallocations;
bool isVehicle = false;
for (; it != itend; it++)
{
Rect current = *it;
//cout<<" Rect current = "<< current<<endl;
//cout<<" roi size= "<<roi.size()<<endl;
Mat roi2Check = roi(Rect(current.x, current.y, current.width, current.height));//define a roi of 50x50
//cout<<" roi2Check size= "<<roi2Check.size()<<endl;
isVehicle = checkIfpatchIsVehicle(roi2Check);
if (isVehicle)
actuallocations.push_back(Rect(current.x, current.y + y_offset, current.width, current.height));
}
if (0 != actuallocations.size())
draw_locations(cropped, actuallocations, Scalar(0, 255, 0));
imshow(WINDOW_NAME, cropped);
if (save_video)
writer.write(cropped);
//wait infinite fro keypress
key = (char)waitKey(3);
if (27 == key)
end_of_process = true;
}
// Close the video file.
// Not required since called by destructor
writer.release();
video.release();
}
示例14: main
//.........这里部分代码省略.........
outfile.open( "data.txt" );
VideoCapture cap( argv[1] );
if( cap.isOpened() == false && outfile.is_open() ) {
cerr << "Cannot open file" << endl;
return -1;
}
double frameWidth = cap.get( CV_CAP_PROP_FRAME_WIDTH );
double frameHeight = cap.get( CV_CAP_PROP_FRAME_HEIGHT );
double videoFPS = cap.get( CV_CAP_PROP_FPS );
VideoWriter vw;
if( argc == 3 ) {
bool open = vw.open( argv[2], CV_FOURCC('m', 'p', '4', 'v'), videoFPS, Size((int)frameWidth, (int)frameHeight));
if( false == open || false == vw.isOpened() ) {
cerr << "Cannot open file " << argv[2] << endl;
return -1;
}
}
cout << " Width: " << frameWidth << endl;
cout << " Height: " << frameHeight << endl;
cout << " FPS: " << videoFPS << endl;
int indicatorY = (int) ((float) frameHeight * 0.1);
int indicatorX = (int) ((float) frameWidth * 0.8);
namedWindow(WIN_TITLE);
resizeWindow(WIN_TITLE, frameHeight, frameWidth);
Mat currFrame, cloneFrame;
Vec3b currPixel;
Vec3b filterPixel;
unsigned long sumR, sumG, sumB;
unsigned long frameNo = 0;
unsigned long lastR = 0;
while( cap.read(currFrame) ) {
sumR = sumG = sumB = 0;
cloneFrame = currFrame.clone();
for( int i = 0; i < frameHeight; i++ ) {
for( int j = 0; j < frameWidth; j++ ) {
currPixel = currFrame.at<Vec3b>(Point(j, i));
sumR += currPixel[2];
if( cloneFrame.at<Vec3b>(Point(j, i))[0] + filterPixel[0] > 255 ) {
cloneFrame.at<Vec3b>(Point(j, i))[0] = 255;
} else {
cloneFrame.at<Vec3b>(Point(j, i))[0] += filterPixel[0];
}
if( cloneFrame.at<Vec3b>(Point(j, i))[1] + filterPixel[1] > 255 ) {
cloneFrame.at<Vec3b>(Point(j, i))[1] = 255;
} else {
cloneFrame.at<Vec3b>(Point(j, i))[1] += filterPixel[1];
}
if( cloneFrame.at<Vec3b>(Point(j, i))[2] + filterPixel[2] > 255 ) {
cloneFrame.at<Vec3b>(Point(j, i))[2] = 255;
} else {
cloneFrame.at<Vec3b>(Point(j, i))[2] += filterPixel[2];
}
}
}
vw.write( cloneFrame );
outfile << frameNo;
outfile << " " << sumR;
outfile << endl;
++frameNo;
if( lastR != 0 ) {
float ratio = (float) sumR / (float) lastR;
if( ratio > 1.08 ) {
showIndicator(indicatorX, indicatorY, cloneFrame);
cout << "Dot " << frameNo << " " << indicatorX << ":" << indicatorY;
cout << " ratio " << ratio << endl;
if( true == filterShouldChange(frameNo) ) {
filterPixel = getRandomFilter();
}
} else {
lastR = sumR;
}
} else {
lastR = sumR;
}
imshow(WIN_TITLE, cloneFrame);
if( waitKey(29) >= 0 ) break;
}
outfile.close();
cap.release();
if( argc == 3) {
vw.release();
}
return 0;
}
示例15: main
int main(int argc, char** argv)
{
//**********************************************************************
//-- Network code ------------------------------------------------------
//**********************************************************************
int localSocket;
int remoteSocket;
int port = 8080;
struct sockaddr_in localAddr;
struct sockaddr_in remoteAddr;
int addrLen = sizeof(struct sockaddr_in);
localSocket = socket(AF_INET , SOCK_STREAM , 0);
if (localSocket == -1)
exit(1);
localAddr.sin_family = AF_INET;
localAddr.sin_addr.s_addr = INADDR_ANY;
localAddr.sin_port = htons(port);
if (bind(localSocket, (struct sockaddr *)&localAddr, sizeof(localAddr)) < 0)
exit(1);
// Listening
listen(localSocket , 1);
// Accept connection from an incoming client
remoteSocket = accept(localSocket, (struct sockaddr *)&remoteAddr, (socklen_t*)&addrLen);
if (remoteSocket < 0)
exit(1);
//**********************************************************************
//-- OpenCV code -------------------------------------------------------
//**********************************************************************
VideoCapture cap(0); // "0": Open the default camera
Mat img, imgGray;
img = Mat::zeros(480 , 640, CV_8U);
if (!img.isContinuous())
img = img.clone();
int imgSize = img.total() * img.elemSize();
int bytes = 0;
int key;
if (!img.isContinuous())
{
img = img.clone();
imgGray = img.clone();
}
VideoWriter videoOut;
videoOut.open("rec.avi", CV_FOURCC('P','I','M','1'), // File name, MPEG-1, 25 fps, 640x480, isColor = true
20, Size(640,480), false);
while(1)
{
cap >> img;
resize(img, img, Size(640, 480));
// Video processing
cvtColor(img, imgGray, CV_BGR2GRAY);
videoOut << imgGray;
// Send processed image
if ((bytes = send(remoteSocket, imgGray.data, imgSize, 0)) < 0)
break;
}
videoOut.release();
close(remoteSocket);
return 0;
}