本文整理汇总了C++中MediaInfo类的典型用法代码示例。如果您正苦于以下问题:C++ MediaInfo类的具体用法?C++ MediaInfo怎么用?C++ MediaInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MediaInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: GetVideoRotation
int CMediaInfo::GetVideoRotation(const wxString &filename)
{
MediaInfo MI;
wstring To_Display;
MI.Open(CConvertUtility::ConvertToStdWstring(filename));
To_Display = MI.Get(Stream_Video, 0, __T("Rotation"), Info_Text, Info_Name).c_str();
MI.Close();
if (To_Display != "")
return std::stoi(To_Display);
return 0;
}
示例2: DebugPrintFunc
void FilmInfoWindow::LoadTechnicalInfo( const QString& fileName )
{
DebugPrintFunc( "FilmInfoWindow::LoadTechnicalInfo", fileName );
QMutexLocker locker( &loadInfoMutex );
MediaInfo* mi = new MediaInfo( fileName );
emit FullInfoLoaded( mi->GetCompleteData() );
delete mi;
DebugPrintFuncDone( "FilmInfoWindow::LoadTechnicalInfo" );
}
示例3: __T
vector<CMetadata> CMediaInfo::ReadMetadata(const wxString &filename)
{
vector<CMetadata> metadata;
//Information about MediaInfo
MediaInfo MI;
String To_Display=MI.Option(__T("Info_Version"), __T("0.7.13;MediaInfoDLL_Example_MSVC;0.7.13")).c_str();
MI.Open(CConvertUtility::ConvertToStdWstring(filename));
std:wstring value = MI.Inform();
metadata = SplitByLine(value);
MI.Close();
return metadata;
}
示例4: ReportFailureOnMainThread
void MediaDecodeTask::OnMetadataRead(MetadataHolder&& aMetadata) {
mMediaInfo = *aMetadata.mInfo;
if (!mMediaInfo.HasAudio()) {
mDecoderReader->Shutdown();
ReportFailureOnMainThread(WebAudioDecodeJob::NoAudio);
return;
}
nsCString codec;
if (!mMediaInfo.mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
codec = nsPrintfCString(
"webaudio; %s", mMediaInfo.mAudio.GetAsAudioInfo()->mMimeType.get());
} else {
codec = nsPrintfCString("webaudio;resource; %s",
mContainerType.Type().AsString().Data());
}
nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction(
"MediaDecodeTask::OnMetadataRead", [codec]() -> void {
MOZ_ASSERT(!codec.IsEmpty());
MOZ_LOG(gMediaDecoderLog, LogLevel::Debug,
("Telemetry (WebAudio) MEDIA_CODEC_USED= '%s'", codec.get()));
Telemetry::Accumulate(Telemetry::HistogramID::MEDIA_CODEC_USED, codec);
});
SystemGroup::Dispatch(TaskCategory::Other, task.forget());
RequestSample();
}
示例5: mon
void
MediaSourceReader::OnTrackBufferConfigured(TrackBuffer* aTrackBuffer, const MediaInfo& aInfo)
{
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
MOZ_ASSERT(aTrackBuffer->IsReady());
MOZ_ASSERT(mTrackBuffers.Contains(aTrackBuffer));
if (aInfo.HasAudio() && !mAudioTrack) {
MSE_DEBUG("MediaSourceReader(%p)::OnTrackBufferConfigured %p audio", this, aTrackBuffer);
mAudioTrack = aTrackBuffer;
}
if (aInfo.HasVideo() && !mVideoTrack) {
MSE_DEBUG("MediaSourceReader(%p)::OnTrackBufferConfigured %p video", this, aTrackBuffer);
mVideoTrack = aTrackBuffer;
}
mDecoder->NotifyWaitingForResourcesStatusChanged();
}
示例6: OnInitDialog
// CDLGMediaInfo message handlers
BOOL CDLGMediaInfo::OnInitDialog()
{
CDialog::OnInitDialog();
MediaInfo MI;
MI.Open(path);
MI.Option("Complete", "1");
m_edit = MI.Inform().c_str();
MI.Close();
UpdateData(FALSE);
return TRUE; // return TRUE unless you set the focus to a control
}
示例7: ReportFailureOnMainThread
void
MediaDecodeTask::OnMetadataRead(MetadataHolder* aMetadata)
{
mMediaInfo = aMetadata->mInfo;
if (!mMediaInfo.HasAudio()) {
mDecoderReader->Shutdown();
ReportFailureOnMainThread(WebAudioDecodeJob::NoAudio);
return;
}
RequestSample();
}
示例8: MOZ_ASSERT
bool
MediaSourceDemuxer::ScanSourceBuffersForContent()
{
MOZ_ASSERT(OnTaskQueue());
if (mSourceBuffers.IsEmpty()) {
return false;
}
MonitorAutoLock mon(mMonitor);
bool haveEmptySourceBuffer = false;
for (const auto& sourceBuffer : mSourceBuffers) {
MediaInfo info = sourceBuffer->GetMetadata();
if (!info.HasAudio() && !info.HasVideo()) {
haveEmptySourceBuffer = true;
}
if (info.HasAudio() && !mAudioTrack) {
mInfo.mAudio = info.mAudio;
mAudioTrack = sourceBuffer;
}
if (info.HasVideo() && !mVideoTrack) {
mInfo.mVideo = info.mVideo;
mVideoTrack = sourceBuffer;
}
if (info.IsEncrypted() && !mInfo.IsEncrypted()) {
mInfo.mCrypto = info.mCrypto;
}
}
if (mInfo.HasAudio() && mInfo.HasVideo()) {
// We have both audio and video. We can ignore non-ready source buffer.
return true;
}
return !haveEmptySourceBuffer;
}
示例9: mDecoderID
SeekTask::SeekTask(const void* aDecoderID,
AbstractThread* aThread,
MediaDecoderReaderWrapper* aReader,
SeekJob&& aSeekJob,
const MediaInfo& aInfo,
const media::TimeUnit& aDuration,
int64_t aCurrentMediaTime)
: mDecoderID(aDecoderID)
, mOwnerThread(aThread)
, mReader(aReader)
, mSeekJob(Move(aSeekJob))
, mCurrentTimeBeforeSeek(aCurrentMediaTime)
, mAudioRate(aInfo.mAudio.mRate)
, mHasAudio(aInfo.HasAudio())
, mHasVideo(aInfo.HasVideo())
, mDropAudioUntilNextDiscontinuity(false)
, mDropVideoUntilNextDiscontinuity(false)
, mIsDiscarded(false)
, mIsAudioQueueFinished(false)
, mIsVideoQueueFinished(false)
, mNeedToStopPrerollingAudio(false)
, mNeedToStopPrerollingVideo(false)
{
// Bound the seek time to be inside the media range.
int64_t end = aDuration.ToMicroseconds();
NS_ASSERTION(end != -1, "Should know end time by now");
int64_t seekTime = mSeekJob.mTarget.GetTime().ToMicroseconds();
seekTime = std::min(seekTime, end);
seekTime = std::max(int64_t(0), seekTime);
NS_ASSERTION(seekTime >= 0 && seekTime <= end,
"Can only seek in range [0,duration]");
mSeekJob.mTarget.SetTime(media::TimeUnit::FromMicroseconds(seekTime));
mDropAudioUntilNextDiscontinuity = HasAudio();
mDropVideoUntilNextDiscontinuity = HasVideo();
// Configure MediaDecoderReaderWrapper.
SetMediaDecoderReaderWrapperCallback();
}
示例10: SeekTask
AccurateSeekTask::AccurateSeekTask(const void* aDecoderID,
AbstractThread* aThread,
MediaDecoderReaderWrapper* aReader,
const SeekTarget& aTarget,
const MediaInfo& aInfo,
const media::TimeUnit& aEnd,
int64_t aCurrentMediaTime)
: SeekTask(aDecoderID, aThread, aReader, aTarget)
, mCurrentTimeBeforeSeek(media::TimeUnit::FromMicroseconds(aCurrentMediaTime))
, mAudioRate(aInfo.mAudio.mRate)
, mDoneAudioSeeking(!aInfo.HasAudio() || aTarget.IsVideoOnly())
, mDoneVideoSeeking(!aInfo.HasVideo())
{
AssertOwnerThread();
// Bound the seek time to be inside the media range.
NS_ASSERTION(aEnd.ToMicroseconds() != -1, "Should know end time by now");
mTarget.SetTime(std::max(media::TimeUnit(), std::min(mTarget.GetTime(), aEnd)));
// Configure MediaDecoderReaderWrapper.
SetCallbacks();
}
示例11: OnInitDialog
BOOL CPPageFileMediaInfo::OnInitDialog()
{
__super::OnInitDialog();
if (!m_pCFont) {
m_pCFont = DNew CFont;
}
if (!m_pCFont) {
return TRUE;
}
MediaInfo MI;
MI.Option(_T("ParseSpeed"), _T("0"));
MI.Option(_T("Language"), mi_get_lang_file());
MI.Option(_T("Complete"));
MI.Open(m_fn.GetString());
MI_Text = MI.Inform().c_str();
MI.Close();
if (!MI_Text.Find(_T("Unable to load"))) {
MI_Text.Empty();
}
LOGFONT lf;
memset(&lf, 0, sizeof(lf));
lf.lfPitchAndFamily = DEFAULT_PITCH | FF_MODERN;
LPCTSTR fonts[] = {_T("Consolas"), _T("Lucida Console"), _T("Courier New"), _T("") };
UINT i = 0;
BOOL success;
PAINTSTRUCT ps;
CDC* cDC = m_mediainfo.BeginPaint(&ps);
do {
wcscpy_s(lf.lfFaceName, LF_FACESIZE, fonts[i]);
lf.lfHeight = -MulDiv(8, cDC->GetDeviceCaps(LOGPIXELSY), 72);
success = IsFontInstalled(fonts[i]) && m_pCFont->CreateFontIndirect(&lf);
i++;
} while (!success && i < _countof(fonts));
m_mediainfo.SetFont(m_pCFont);
m_mediainfo.SetWindowText(MI_Text);
m_mediainfo.EndPaint(&ps);
OldControlProc = (WNDPROC)SetWindowLongPtr(m_mediainfo.m_hWnd, GWLP_WNDPROC, (LONG_PTR)ControlProc);
return TRUE;
}
示例12: AssertOwnerThread
void
AudioSinkWrapper::Start(int64_t aStartTime, const MediaInfo& aInfo)
{
AssertOwnerThread();
MOZ_ASSERT(!mIsStarted, "playback already started.");
mIsStarted = true;
mPlayDuration = aStartTime;
mPlayStartTime = TimeStamp::Now();
// no audio is equivalent to audio ended before video starts.
mAudioEnded = !aInfo.HasAudio();
if (aInfo.HasAudio()) {
mAudioSink = mCreator->Create();
mEndPromise = mAudioSink->Init(mParams);
mAudioSinkPromise.Begin(mEndPromise->Then(
mOwnerThread.get(), __func__, this,
&AudioSinkWrapper::OnAudioEnded,
&AudioSinkWrapper::OnAudioEnded));
}
}
示例13: url
void XSPFParser::writeTrack(QXmlStreamWriter & xml, const MediaInfo & mediaInfo) {
xml.writeStartElement(XSPF_TRACK);
//Filename
QUrl url(mediaInfo.fileName());
writeTextElement(xml, XSPF_LOCATION, url.toEncoded());
//Unique ID
//FIXME not implemented yet
//writeTextElement(xml, XSPF_IDENTIFIER, QString());
//Artist
writeTextElement(xml, XSPF_CREATOR, mediaInfo.metadataValue(MediaInfo::Artist));
//Album
writeTextElement(xml, XSPF_ALBUM, mediaInfo.metadataValue(MediaInfo::Album));
//Track number
writeTextElement(xml, XSPF_TRACKNUM, mediaInfo.metadataValue(MediaInfo::TrackNumber));
//Title
writeTextElement(xml, XSPF_TITLE, mediaInfo.metadataValue(MediaInfo::Title));
//Comment
writeTextElement(xml, XSPF_ANNOTATION, mediaInfo.metadataValue(MediaInfo::Comment));
//Length
writeIntElement(xml, XSPF_DURATION, mediaInfo.lengthMilliseconds());
//Album art URL
//FIXME not implemented yet
//writeTextElement(xml, XSPF_IMAGE, mediaInfo.metadataValue(MediaInfo::AlbumArt));
//URL of the original web page
writeTextElement(xml, XSPF_INFO, mediaInfo.metadataValue(MediaInfo::URL));
xml.writeStartElement(XSPF_EXTENSION);
xml.writeAttribute(XSPF_APPLICATION, XSPF_QUARKPLAYER_NAMESPACE);
writeTextElementWithNamespace(xml, XSPF_QUARKPLAYER_NAMESPACE, XSPF_QUARKPLAYER_CUE_START_INDEX, mediaInfo.cueStartIndexFormatted());
writeTextElementWithNamespace(xml, XSPF_QUARKPLAYER_NAMESPACE, XSPF_QUARKPLAYER_CUE_END_INDEX, mediaInfo.cueEndIndexFormatted());
writeTextElementWithNamespace(xml, XSPF_QUARKPLAYER_NAMESPACE, XSPF_QUARKPLAYER_YEAR, mediaInfo.metadataValue(MediaInfo::Year));
writeTextElementWithNamespace(xml, XSPF_QUARKPLAYER_NAMESPACE, XSPF_QUARKPLAYER_GENRE, mediaInfo.metadataValue(MediaInfo::Genre));
xml.writeEndElement(); //extension
xml.writeEndElement(); //track
}
示例14: AssertOwnerThread
void
VideoSink::Start(int64_t aStartTime, const MediaInfo& aInfo)
{
AssertOwnerThread();
VSINK_LOG("[%s]", __func__);
mAudioSink->Start(aStartTime, aInfo);
mHasVideo = aInfo.HasVideo();
if (mHasVideo) {
mEndPromise = mEndPromiseHolder.Ensure(__func__);
ConnectListener();
TryUpdateRenderedVideoFrames();
}
}
示例15: Start
nsresult VideoSink::Start(const TimeUnit& aStartTime, const MediaInfo& aInfo) {
AssertOwnerThread();
VSINK_LOG("[%s]", __func__);
nsresult rv = mAudioSink->Start(aStartTime, aInfo);
mHasVideo = aInfo.HasVideo();
if (mHasVideo) {
mEndPromise = mEndPromiseHolder.Ensure(__func__);
// If the underlying MediaSink has an end promise for the video track (which
// happens when mAudioSink refers to a DecodedStream), we must wait for it
// to complete before resolving our own end promise. Otherwise, MDSM might
// stop playback before DecodedStream plays to the end and cause
// test_streams_element_capture.html to time out.
RefPtr<EndedPromise> p = mAudioSink->OnEnded(TrackInfo::kVideoTrack);
if (p) {
RefPtr<VideoSink> self = this;
p->Then(mOwnerThread, __func__,
[self]() {
self->mVideoSinkEndRequest.Complete();
self->TryUpdateRenderedVideoFrames();
// It is possible the video queue size is 0 and we have no
// frames to render. However, we need to call
// MaybeResolveEndPromise() to ensure mEndPromiseHolder is
// resolved.
self->MaybeResolveEndPromise();
},
[self]() {
self->mVideoSinkEndRequest.Complete();
self->TryUpdateRenderedVideoFrames();
self->MaybeResolveEndPromise();
})
->Track(mVideoSinkEndRequest);
}
ConnectListener();
// Run the render loop at least once so we can resolve the end promise
// when video duration is 0.
UpdateRenderedVideoFrames();
}
return rv;
}