本文整理汇总了C++中VideoSource类的典型用法代码示例。如果您正苦于以下问题:C++ VideoSource类的具体用法?C++ VideoSource怎么用?C++ VideoSource使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VideoSource类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ofLogNotice
BaseSource* MediaServer::loadVideo(string& path) {
VideoSource* videoSource = NULL;
// Check if this video is already loaded
bool isVideoLoaded = false;
if (loadedSources.count(path)) {
videoSource = static_cast<VideoSource*>(loadedSources[path]);
isVideoLoaded = true;
}
// If is loaded
if (isVideoLoaded) {
// Increase reference count of this source
videoSource->referenceCount++;
std::stringstream refss;
refss << "Current reference count for " << path << " = " << videoSource->referenceCount;
ofLogNotice("MediaServer") << refss.str();
// Notify objects registered to onImageLoaded event
std::stringstream ss;
ss << "Video " << path << " already loaded";
ofLogNotice("MediaServer") << ss.str();
ofNotifyEvent(onVideoLoaded, path, this);
return videoSource;
}
// Else load fresh
videoSource = new VideoSource();
videoSource->loadVideo(path);
loadedSources[path] = videoSource;
// Set reference count of this image path to 1
//referenceCount[path] = 1;
std::stringstream refss;
refss << "Initialized reference count of " << path << " to " << videoSource->referenceCount;
ofLogNotice("MediaServer") << refss.str();
ofNotifyEvent(onVideoLoaded, path, this);
return videoSource;
}
示例2: unloadVideo
void MediaServer::unloadVideo(string& path) {
VideoSource* videoSource = static_cast<VideoSource*>(getSourceByPath(path));
// Decrease reference count of the video
//referenceCount[path]--;
videoSource->referenceCount--;
// Unload only if reference count is less or equal to 0
if (videoSource->referenceCount > 0) {
ofLogNotice("MediaServer") << "Not unloading video as it is being referenced elsewhere";
return;
}
// Reference count 0 or less, let's unload the video
ofLogNotice("MediaServer") << "Removing video " << path;
// Distroy video source
if (loadedSources.count(path)) {
ofLogNotice("MediaServer") << "Source count before video removal: " << loadedSources.size() << endl;
videoSource->clear();
std::map<std::string, BaseSource*>::iterator it = loadedSources.find(path);
delete it->second;
loadedSources.erase(it);
ofLogNotice("MediaServer") << "Source count after video removal: " << loadedSources.size() << endl;
ofNotifyEvent(onVideoUnloaded, path, this);
return;
}
// Something wrong here, we should be out of the routine by now
std::stringstream failss;
failss << "Failed to remove video source: " << path;
ofLogFatalError("MediaServer") << failss.str();
std::exit(EXIT_FAILURE);
}
示例3: main
int main(int argc, char *argv[])
{
VideoSource videoSrc;
videoSrc.init();
QApplication a(argc, argv);
CameraWindow w;
w.show();
CannyFilter cannyFilter;
cannyFilter.setLowThreshold(50);
cannyFilter.setHighThreshold(100);
TextDetectElement textDetect;
QObject::connect(&videoSrc, SIGNAL(pushRawFrame(Mat&)), &cannyFilter, SLOT(onPushRawFrame(Mat&)));
QObject::connect(&videoSrc, SIGNAL(pushRawFrame(Mat&)), &textDetect, SLOT(onPushRawFrame(Mat&)));
QObject::connect(&cannyFilter, SIGNAL(pushEdgeImage(Mat&)), &textDetect, SLOT(onPushEdgeImage(Mat&)));
QObject::connect(&textDetect, SIGNAL(pushResultImage(Mat&)), &w, SLOT(onPushImage(Mat&)));
// videoSrc.addPushListener(cannyFilter);
// cannyFilter.addPushListener(textDetect);
// textDetect.addPushListener(w);
videoSrc.start();
return a.exec();
}
示例4:
SearchParams *MediaView::getSearchParams() {
VideoSource *videoSource = playlistModel->getVideoSource();
if (videoSource && videoSource->metaObject()->className() == QLatin1String("YTSearch")) {
YTSearch *search = qobject_cast<YTSearch *>(videoSource);
return search->getSearchParams();
}
return nullptr;
}
示例5: while
void MediaView::stop() {
stopped = true;
while (!history.isEmpty()) {
VideoSource *videoSource = history.takeFirst();
if (!videoSource->parent()) delete videoSource;
}
playlistModel->abortSearch();
videoAreaWidget->clear();
videoAreaWidget->update();
errorTimer->stop();
playlistView->selectionModel()->clearSelection();
if (downloadItem) {
downloadItem->stop();
delete downloadItem;
downloadItem = 0;
currentVideoSize = 0;
}
The::globalActions()->value("refine-search")->setChecked(false);
updateSubscriptionAction(0, false);
#ifdef APP_ACTIVATION
demoTimer->stop();
#endif
foreach (QAction *action, currentVideoActions)
action->setEnabled(false);
QAction *a = The::globalActions()->value("download");
a->setEnabled(false);
a->setVisible(false);
#ifdef APP_PHONON
mediaObject->stop();
#endif
currentVideoId.clear();
#ifndef APP_PHONON_SEEK
QSlider *slider = MainWindow::instance()->getSlider();
slider->setEnabled(false);
slider->setValue(0);
#else
Phonon::SeekSlider *slider = MainWindow::instance()->getSeekSlider();
#endif
if (snapshotSettings) {
delete snapshotSettings;
snapshotSettings = 0;
}
}
示例6: setup
void VideoRate::setup(VideoSource & _source, float fps){
source = &_source;
ofAddListener(source->newFrameEvent,this,&VideoRate::newVideoFrame);
setFps(fps);
front = _source.getNextVideoFrame();
//startThread(true,false);
ofAddListener(ofEvents().update,this,&VideoRate::glThreadUpdate);
}
示例7: Q_UNUSED
void MediaView::setVideoSource(VideoSource *videoSource, bool addToHistory, bool back) {
Q_UNUSED(back);
stopped = false;
errorTimer->stop();
// qDebug() << "Adding VideoSource" << videoSource->getName() << videoSource;
if (addToHistory) {
int currentIndex = getHistoryIndex();
if (currentIndex >= 0 && currentIndex < history.size() - 1) {
while (history.size() > currentIndex + 1) {
VideoSource *vs = history.takeLast();
if (!vs->parent()) {
qDebug() << "Deleting VideoSource" << vs->getName() << vs;
vs->deleteLater();
}
}
}
history.append(videoSource);
}
#ifdef APP_EXTRA
if (history.size() > 1)
Extra::slideTransition(playlistView->viewport(), playlistView->viewport(), back);
#endif
playlistModel->setVideoSource(videoSource);
if (media->state() == Media::StoppedState) {
QSettings settings;
if (settings.value("manualplay", false).toBool()) {
videoAreaWidget->showPickMessage();
}
}
sidebar->showPlaylist();
sidebar->getRefineSearchWidget()->setSearchParams(getSearchParams());
sidebar->hideSuggestions();
sidebar->getHeader()->updateInfo();
SearchParams *searchParams = getSearchParams();
bool isChannel = searchParams && !searchParams->channelId().isEmpty();
playlistView->setClickableAuthors(!isChannel);
}
示例8: while
void MediaView::stop() {
stopped = true;
while (!history.isEmpty()) {
VideoSource *videoSource = history.takeFirst();
// Don't delete videoSource in the Browse view
if (!videoSource->parent()) {
videoSource->deleteLater();
}
}
playlistModel->abortSearch();
videoAreaWidget->clear();
videoAreaWidget->update();
errorTimer->stop();
playlistView->selectionModel()->clearSelection();
MainWindow::instance()->getAction("refineSearch")->setChecked(false);
updateSubscriptionAction(nullptr, false);
#ifdef APP_ACTIVATION
demoTimer->stop();
#endif
for (QAction *action : currentVideoActions)
action->setEnabled(false);
QAction *a = MainWindow::instance()->getAction("download");
a->setEnabled(false);
a->setVisible(false);
media->stop();
media->clearQueue();
currentVideoId.clear();
#ifdef APP_SNAPSHOT
if (snapshotSettings) {
delete snapshotSettings;
snapshotSettings = nullptr;
}
#endif
}
示例9: printf
void VideoBuffer::setup(VideoSource & _source, int size, bool allocateOnSetup){
source=&_source;
totalFrames=0;
maxSize = size;
VideoSource::width = _source.getWidth();
VideoSource::height = _source.getHeight();
if(allocateOnSetup){
printf("VideoBuffer:: allocating on setup %d %d : ",VideoSource::getWidth(),VideoSource::getHeight());
for(int i=0;i<size;i++){
VideoFrame videoFrame = VideoFrame::newVideoFrame(source->getNextVideoFrame().getPixelsRef());
//videoFrame.getTextureRef();
newVideoFrame(videoFrame);
printf("%d-",i);
}
printf("//\n");
}
resume();
microsOneSec=-1;
}
示例10: setup
void VideoBuffer::setup(VideoSource & source, int size, bool allocateOnSetup){
this->source=&source;
totalFrames=0;
maxSize = size;
if(allocateOnSetup){
for(int i=0;i<size;i++){
VideoFrame videoFrame = VideoFrame::newVideoFrame(source.getNextVideoFrame().getPixelsRef());
videoFrame.getTextureRef();
newVideoFrame(videoFrame);
}
}
resume();
microsOneSec=-1;
}
示例11: wxDialog
VideoInfoDialog::VideoInfoDialog( wxWindow* parent, RectangleBase* o )
: wxDialog( parent, wxID_ANY, _("Video Info") ), obj( o )
{
SetSize( wxSize( 250, 150 ) );
wxStaticText* labelText = new wxStaticText( this, wxID_ANY, _("") );
wxStaticText* infoText = new wxStaticText( this, wxID_ANY, _("") );
std::string labelTextStd, infoTextStd;
labelTextStd += "Name:\n";
infoTextStd += obj->getName() + "\n";
VideoSource* video = dynamic_cast<VideoSource*>( obj );
if ( video )
{
labelTextStd += "RTP name:\n";
infoTextStd += video->getMetadata( VPMSession::VPMSESSION_SDES_NAME ) +
"\n";
labelTextStd += "RTP cname:\n";
infoTextStd += video->getMetadata( VPMSession::VPMSESSION_SDES_CNAME ) +
"\n";
labelTextStd += "Location:\n";
infoTextStd += video->getMetadata( VPMSession::VPMSESSION_SDES_LOC ) +
"\n";
labelTextStd += "Codec:\n";
infoTextStd += std::string( video->getPayloadDesc() ) + "\n";
char width[10];
char height[10];
sprintf( width, "%u", video->getVideoWidth() );
sprintf( height, "%u", video->getVideoHeight() );
labelTextStd += "Resolution:\n";
infoTextStd += std::string( width ) + " x " + std::string( height ) +
"\n";
}
labelTextStd += "Grouped?";
infoTextStd += std::string( obj->isGrouped() ? "Yes" : "No" );
if ( obj->isGrouped() )
{
labelTextStd += "\nGroup:";
infoTextStd += "\n" + obj->getGroup()->getName();
}
labelText->SetLabel( wxString( labelTextStd.c_str(), wxConvUTF8 ) );
infoText->SetLabel( wxString( infoTextStd.c_str(), wxConvUTF8 ) );
wxBoxSizer* textSizer = new wxBoxSizer( wxHORIZONTAL );
textSizer->Add( labelText, wxSizerFlags(0).Align(0).Border( wxALL, 10 ) );
textSizer->Add( infoText, wxSizerFlags(0).Align(0).Border( wxALL, 10 ) );
SetSizer( textSizer );
textSizer->SetSizeHints( this );
}
示例12: PushAudio
/**************************************************
获取音频数据函数
参数:
lpData :输入数据的内存
size:输入数据的长度
pts:输入数据的时间戳
***************************************************/
void CDemandMediaAudio::PushAudio(const void *lpData, unsigned int size, int64_t pts, IBaseVideo *Video, bool bCanPlay)
{
VideoSource *Source = dynamic_cast<VideoSource*>(Video);
if (!m_uBlockSize || !Source)
return;
if (m_sAudioParam.iChannel <= 2)
{
if (fVolume != 1.0f)
{
short *Tem = (short*)lpData;
for (int i = 0; i < size; i += 2)
{
long sVolume = Tem[i / 2];
sVolume *= fVolume;
if (sVolume > 0x7fff)
{
sVolume = 0x7fff;
}
else if (sVolume < -0x8000)
{
sVolume = -0x8000;
}
Tem[i / 2] = (short)sVolume;
}
}
Source->PlayCallBackAudio((LPBYTE)lpData, size);
}
else
{
UINT totalSamples = size * 8 / m_sAudioParam.iBitPerSample;
if (TemconvertBuffer.Num() < totalSamples)
TemconvertBuffer.SetSize(totalSamples);
OutputconvertBuffer.SetSize(totalSamples / m_sAudioParam.iChannel * 2);
if (m_sAudioParam.iBitPerSample == 8)
{
float *tempConvert = TemconvertBuffer.Array();
char *tempSByte = (char*)lpData;
while (totalSamples--)
{
*(tempConvert++) = float(*(tempSByte++)) / 127.0f;
}
}
else if (m_sAudioParam.iBitPerSample == 16)
{
float *tempConvert = TemconvertBuffer.Array();
short *tempShort = (short*)lpData;
while (totalSamples--)
{
*(tempConvert++) = float(*(tempShort++)) / 32767.0f;
}
}
else if (m_sAudioParam.iBitPerSample == 24)
{
float *tempConvert = TemconvertBuffer.Array();
BYTE *tempTriple = (BYTE*)lpData;
TripleToLong valOut;
while (totalSamples--)
{
TripleToLong &valIn = (TripleToLong&)tempTriple;
valOut.wVal = valIn.wVal;
valOut.tripleVal = valIn.tripleVal;
if (valOut.tripleVal > 0x7F)
valOut.lastByte = 0xFF;
*(tempConvert++) = float(double(valOut.val) / 8388607.0);
tempTriple += 3;
}
}
else if (m_sAudioParam.iBitPerSample == 32)
{
float *tempConvert = TemconvertBuffer.Array();
long *tempShort = (long*)lpData;
while (totalSamples--)
{
*(tempConvert++) = float(double(*(tempShort++)) / 2147483647.0);
}
}
float *inputTemp = TemconvertBuffer.Array();
//.........这里部分代码省略.........
示例13: main
int main( int argc, char* argv[])
{
// On déclare notre pointeur sur SourceVideo
VideoSource *src;
CvVideoWriter *writer = 0;
int isColor = 1;
int fps = 30; // or 30
int frameW = 640; // 744 for firewire cameras
int frameH = 480; // 480 for firewire cameras
writer=cvCreateVideoWriter("out.avi",CV_FOURCC('P','I','M','1'),
fps,cvSize(frameW,frameH),isColor);
if( argc > 1 ) {
// Initialisation : fichier vidéo
string path(argv[1]);
src = new VideoFile( path, (argc > 2) );
}
else {
// Initialisation : webcam
src = new Camera( 0 );
}
// Initialisation du flux vidéo
try {
src->open();
}
catch( Exception &e ) {
// Si une exception se produit, on l'affiche et on quitte.
cout << e.what() << endl;
delete src;
return 10;
}
// Si tout va bien, on affiche les informations du flux vidéo.
cout << src->getInfos() << endl;
cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
Image img;
char key = 'a';
// Début de la mesure du frame rate
debut_mesure = getTimeMillis();
while( key != 'q' ) {
try {
src->getFrame( img );
}
catch(Exception &e) {
cout << "\n" << e.what() << endl;
break;
}
/*CvScalar scalaire;
scalaire.val[0] = 120;
scalaire.val[1] = scalaire.val[2] = 0;
img.colorFilter(scalaire);*/
img.colorPaint2(top_left,bottom_right);
if (bottom_right.x < 720) {
bottom_right.x++;
}
if (bottom_right.y < 576) {
bottom_right.y++;
}
if (top_left.x > 0) {
top_left.x--;
}
if (top_left.y > 0) {
top_left.y--;
}
//img.colorBlacknWhite();
cvShowImage( "video", img );
cvWriteFrame(writer,img);
key = cvWaitKey( 10 );
// Affichage du frame rate
cout << "\rFrame Rate : " << setw(5);
cout << left << setprecision(4);
cout << calculFrameRate() << " FPS" << flush;
}
cout << endl;
cvDestroyWindow( "video" );
delete src;
return 0;
}
示例14: main
int main( int aCount, const char ** aArgs )
{
// Creates default configuration, parse command line parameters
Config lConfig;
lConfig.ParseCommandLine( aCount, aArgs );
// Create video source (pattern generator)
VideoSource lSource;
// Get video source properties
PvUInt32 lWidth = lConfig.GetWidth();
PvUInt32 lHeight = lConfig.GetHeight();
PvPixelType lPixelFormat = PvPixelMono8;
PvUInt32 lSize = lWidth * lHeight;
// Allocate transmit buffers
PvBufferList lBuffers;
PvBufferList lFreeBuffers;
for ( PvUInt32 i = 0; i < lConfig.GetBufferCount(); i++ )
{
// Alloc new buffer
PvBuffer *lBuffer = new PvBuffer();
lBuffer->GetImage()->Alloc( lWidth, lHeight, lPixelFormat );
// Set to 0
memset( lBuffer->GetDataPointer(), 0x00, lSize );
// Add to both buffer list and free buffer list
lBuffers.push_back( lBuffer );
lFreeBuffers.push_back( lBuffer );
}
// Create transmitter, set packet size
PvTransmitterRaw lTransmitter;
lTransmitter.SetPacketSize( lConfig.GetPacketSize() );
// Create virtual device (used for discovery)
PvVirtualDevice lDevice;
lDevice.StartListening( lConfig.GetSourceAddress() );
cout << "Listening for device discovery requests on " << lConfig.GetSourceAddress() << endl;
// Open transmitter - sets destination and source
PvResult lResult = lTransmitter.Open(
lConfig.GetDestinationAddress(), lConfig.GetDestinationPort(),
lConfig.GetSourceAddress(), lConfig.GetSourcePort() );
if ( !lResult.IsOK() )
{
cout << "Failed to open a connection to the transmitter." << endl;
return 1;
}
cout << "Transmission stream opened:" << endl;
cout << "Source: " << lTransmitter.GetSourceIPAddress().GetAscii() << " port " << lTransmitter.GetSourcePort() << endl;
cout << "Destination: " << lConfig.GetDestinationAddress() << " port " << lConfig.GetDestinationPort() << endl;
if ( !lConfig.GetSilent() )
{
cout << "Press any key to begin transmitting.\r";
PvWaitForKeyPress();
}
cout << "Press any key to stop transmitting." << endl;
// Set maximum throughput (just to even out traffic, as we control throughput at the source)
if ( lConfig.GetFPS() != 0 )
{
// Multiply image size (in bits) by FPS
float lMax = static_cast<float>( lSize ) * 8;
lMax *= lConfig.GetFPS();
// Since we control throughput at the source, make sure maximum throughput is slightly
// higher than what we need. We want to even out packet traffic, not slow down source frame rate
lMax *= 1.1f;
// Set max throughput
lTransmitter.SetMaxPayloadThroughput( lMax );
}
char lDoodle[] = "|\\-|-/";
int lDoodleIndex = 0;
// Reset transmitter stats
lTransmitter.ResetStats();
// Used to transmit at a steady frame rate
PvFPSStabilizer lStabilizer;
// Acquisition/transmission loop
while( !PvKbHit() )
{
// Step 1: If timing is right to meet desired FPS, generate pattern, transmit
if ( ( lConfig.GetFPS() == 0 ) || lStabilizer.IsTimeToDisplay( (PvUInt32)lConfig.GetFPS() ) )
{
// Are there buffers available for transmission?
if ( lFreeBuffers.size() > 0 )
{
// Retrieve buffer from list
PvBuffer *lBuffer = lFreeBuffers.front();
lFreeBuffers.pop_front();
//.........这里部分代码省略.........
示例15: VPMVideoBufferSink
void VideoListener::vpmsession_source_created( VPMSession &session,
uint32_t ssrc, uint32_t pt, VPMPayload type,
VPMPayloadDecoder* decoder )
{
VPMVideoDecoder *d = dynamic_cast<VPMVideoDecoder*>( decoder );
if ( d )
{
sourceCount++;
VPMVideoFormat format = d->getOutputFormat();
VPMVideoBufferSink *sink;
// if we have shaders available, set the output format to YUV420P so
// the videosource class will apply the YUV420P -> RGB conversion shader
gravUtil::logVerbose( "VideoListener::vpmsession_source_created: "
"creating source, have shaders? %i format? %i (yuv420p: %i)\n",
GLUtil::getInstance()->areShadersAvailable(), format,
VIDEO_FORMAT_YUV420 );
if ( GLUtil::getInstance()->areShadersAvailable() &&
format == VIDEO_FORMAT_YUV420 )
sink = new VPMVideoBufferSink( format );
else
sink = new VPMVideoBufferSink( VIDEO_FORMAT_RGB24 );
// note that the buffer sink will be deleted when the decoder for the
// source is (inside VPMedia), so that's why it isn't deleted here or in
// videosource
if ( !sink->initialise() )
{
gravUtil::logError( "VideoListener::vpmsession_source_created: "
"Failed to initialise video sink\n" );
return;
}
d->connectVideoProcessor( sink );
// this is a bit clunky - VideoSource needs to have a reference to the
// general SessionEntry but we only know the VPMSession pointer (not
// even the address since that's only in VPMSession_net)
// this should be thread-safe since this function will be called on the
// second thread
// TODO will change if sessions are on their own threads?
SessionEntry* se = sessionMan->findSessionByVPMSession( &session );
// if we're getting a new video from a VPMSession but it's not found in
// the SessionManager something is seriously fubar
if ( se == NULL )
{
gravUtil::logError( "VideoListener::vpmsession_source_created: "
"session not found in SessionManager. Something is "
"horribly wrong :(\n" );
return;
}
VideoSource* source = new VideoSource( se, this, ssrc, sink, 0.0f,
0.0f );
source->setScale( 5.25f, 5.25f );
source->move( x, y );
objectMan->addNewSource( source );
// new frame callback mostly just used for testing
//sink->addNewFrameCallback( &newFrameCallbackTest, (void*)timer );
// do some basic grid positions
// TODO make this better, use layoutmanager somehow?
// probably should be moved to objectManager regardless
x += 8.8f;
if ( x > 15.0f )
{
x = -7.5f;
y -= 5.9f;
}
// reset to top
if ( y < -11.0f )
{
x = initialX + ( 0.5f * ( sourceCount / 9 ) );
y = initialY - ( 0.5f * ( sourceCount / 9 ) );
}
}
}