本文整理汇总了C++中VideoInfo::HasAudio方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoInfo::HasAudio方法的具体用法?C++ VideoInfo::HasAudio怎么用?C++ VideoInfo::HasAudio使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoInfo
的用法示例。
在下文中一共展示了VideoInfo::HasAudio方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: LoadFromClip
/// @brief Read from environment
/// @param _clip
///
void AvisynthAudioProvider::LoadFromClip(AVSValue _clip) {
AVSValue script;
// Check if it has audio
VideoInfo vi = _clip.AsClip()->GetVideoInfo();
if (!vi.HasAudio()) throw agi::AudioDataNotFoundError("No audio found.", 0);
IScriptEnvironment *env = avs_wrapper.GetEnv();
// Convert to one channel
char buffer[1024];
strcpy(buffer,lagi_wxString(OPT_GET("Audio/Downmixer")->GetString()).mb_str(csConvLocal));
script = env->Invoke(buffer, _clip);
// Convert to 16 bits per sample
script = env->Invoke("ConvertAudioTo16bit", script);
vi = script.AsClip()->GetVideoInfo();
// Convert sample rate
int setsample = OPT_GET("Provider/Audio/AVS/Sample Rate")->GetInt();
if (vi.SamplesPerSecond() < 32000) setsample = 44100;
if (setsample != 0) {
AVSValue args[2] = { script, setsample };
script = env->Invoke("ResampleAudio", AVSValue(args,2));
}
// Set clip
PClip tempclip = script.AsClip();
vi = tempclip->GetVideoInfo();
// Read properties
channels = vi.AudioChannels();
num_samples = vi.num_audio_samples;
sample_rate = vi.SamplesPerSecond();
bytes_per_sample = vi.BytesPerAudioSample();
float_samples = false;
clip = tempclip;
}
示例2: dimzon_avs_init
//.........这里部分代码省略.........
if (pixel_type_req == 0) { /*Выводим видео как оно есть, без проверок и преобразований*/ }
else if (pixel_type_req == inf.CS_BGR32) { if (!inf.IsRGB32()) { filter = "ConvertToRGB32"; err_string = "AviSynthWrapper: Cannot convert video to RGB32!"; }}
else if (pixel_type_req == inf.CS_BGR24) { if (!inf.IsRGB24()) { filter = "ConvertToRGB24"; err_string = "AviSynthWrapper: Cannot convert video to RGB24!"; }}
else if (pixel_type_req == inf.CS_YUY2) { if (!inf.IsYUY2()) { filter = "ConvertToYUY2"; err_string = "AviSynthWrapper: Cannot convert video to YUY2!"; }}
else if (pixel_type_req == inf.CS_YV12) { if (!inf.IsYV12()) { filter = "ConvertToYV12"; err_string = "AviSynthWrapper: Cannot convert video to YV12!"; }}
else if (pixel_type_req == inf.CS_I420) { if (!inf.IsYV12()) { filter = "ConvertToYV12"; err_string = "AviSynthWrapper: Cannot convert video to YV12!"; }}
else
{
//"2.5 Baked API will see all new planar as YV12"
//YV411, YV24, YV16 и Y8 в IsYV12() определяются как YV12
strncpy_s(pstr->err, ERRMSG_LEN, "AviSynthWrapper: Requested PixelType isn't valid or such conversion is not yet implemented!", _TRUNCATE);
return 5;
}
if (filter.length() > 0)
{
res = pstr->env->Invoke(filter.c_str(), AVSValue(&res, 1));
pstr->clp = res.AsClip();
VideoInfo infh = pstr->clp->GetVideoInfo();
if (pixel_type_req == inf.CS_BGR32 && !infh.IsRGB32() ||
pixel_type_req == inf.CS_BGR24 && !infh.IsRGB24() ||
pixel_type_req == inf.CS_YUY2 && !infh.IsYUY2() ||
pixel_type_req == inf.CS_YV12 && !infh.IsYV12() ||
pixel_type_req == inf.CS_I420 && !infh.IsYV12())
{
strncpy_s(pstr->err, ERRMSG_LEN, err_string.c_str(), _TRUNCATE);
return 5;
}
}
}
if (inf.HasAudio())
{
string filter = "";
string err_string = "";
//Original и Requested SampleType
if (vi != NULL) vi->sample_type_orig = inf.sample_type;
int sample_type_req = (vi != NULL) ? vi->sample_type : 0;
if (sample_type_req == 0) { /*Выводим звук как он есть, без проверок и преобразований*/ }
else if (sample_type_req == SAMPLE_FLOAT) { if (inf.sample_type != SAMPLE_FLOAT) { filter = "ConvertAudioToFloat"; err_string = "AviSynthWrapper: Cannot convert audio to FLOAT!"; }}
else if (sample_type_req == SAMPLE_INT32) { if (inf.sample_type != SAMPLE_INT32) { filter = "ConvertAudioTo32bit"; err_string = "AviSynthWrapper: Cannot convert audio to INT32!"; }}
else if (sample_type_req == SAMPLE_INT24) { if (inf.sample_type != SAMPLE_INT24) { filter = "ConvertAudioTo24bit"; err_string = "AviSynthWrapper: Cannot convert audio to INT24!"; }}
else if (sample_type_req == SAMPLE_INT16) { if (inf.sample_type != SAMPLE_INT16) { filter = "ConvertAudioTo16bit"; err_string = "AviSynthWrapper: Cannot convert audio to INT16!"; }}
else if (sample_type_req == SAMPLE_INT8) { if (inf.sample_type != SAMPLE_INT8) { filter = "ConvertAudioTo8bit"; err_string = "AviSynthWrapper: Cannot convert audio to INT8!"; }}
else
{
strncpy_s(pstr->err, ERRMSG_LEN, "AviSynthWrapper: Requested SampleType isn't valid or such conversion is not yet implemented!", _TRUNCATE);
return 6;
}
if (filter.length() > 0)
{
res = pstr->env->Invoke(filter.c_str(), res);
pstr->clp = res.AsClip();
VideoInfo infh = pstr->clp->GetVideoInfo();
if (sample_type_req == SAMPLE_FLOAT && infh.sample_type != SAMPLE_FLOAT ||
sample_type_req == SAMPLE_INT32 && infh.sample_type != SAMPLE_INT32 ||
sample_type_req == SAMPLE_INT24 && infh.sample_type != SAMPLE_INT24 ||
sample_type_req == SAMPLE_INT16 && infh.sample_type != SAMPLE_INT16 ||
sample_type_req == SAMPLE_INT8 && infh.sample_type != SAMPLE_INT8)
示例3: main
int main(int argc, TCHAR* argv[]) {
SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED);
printf("Usage: filmtester <avs filename> [duplicates_maxlength=2]\n");
printf("The program plays the AVS file and tests for frame duplicates\n\n");
int duplicates_maxlength = 2;
if (argc < 2) {
printf("No filename specified.\n\n");
return -1;
}
if (argc > 2) {
duplicates_maxlength = _ttoi(argv[2]);
printf("INFO: duplicates_maxlength set to %d\n", duplicates_maxlength);
}
IScriptEnvironment *env = CreateScriptEnvironment();
_tprintf(_T("Loading \"%s\" ...\n"), argv[1]);
LPCSTR arg_names[1] = { nullptr };
AVSValue arg_vals[1] = { (LPCSTR)argv[1] };
clip = env->Invoke("import", AVSValue(arg_vals,1), arg_names).AsClip();
printf("AVS file loaded successfully.\n\n");
VideoInfo vi = clip->GetVideoInfo();
printf("VideoInfo:\n");
printf("-----------\n");
if (vi.HasVideo()) {
printf("width x height: %dx%d\n", vi.width, vi.height);
printf("num_frames: %d\n", vi.num_frames);
printf("fps: %d/%d\n", vi.fps_numerator, vi.fps_denominator);
std::string colorspace;
if (vi.pixel_type & VideoInfo::CS_BGR) colorspace += "BGR, ";
if (vi.pixel_type & VideoInfo::CS_YUV) colorspace += "YUV, ";
if (vi.pixel_type & VideoInfo::CS_INTERLEAVED) colorspace += "INTERLEAVED, ";
if (vi.pixel_type & VideoInfo::CS_PLANAR) colorspace += "PLANAR, ";
if (colorspace.length() > 0) colorspace.erase(colorspace.length()-2);
printf("colorspace: %s\n", colorspace.c_str());
std::string colorformat;
if (vi.pixel_type & VideoInfo::CS_BGR24) colorformat += "BGR24, ";
if (vi.pixel_type & VideoInfo::CS_BGR32) colorformat += "BGR32, ";
if (vi.pixel_type & VideoInfo::CS_YUY2) colorformat += "YUY2, ";
if (vi.pixel_type & VideoInfo::CS_YV12) colorformat += "YV12, ";
if (vi.pixel_type & VideoInfo::CS_I420) colorformat += "I420 (IYUV), ";
if (colorformat.length() > 0)
colorformat.erase(colorformat.length()-2);
else
colorformat = "UNKNOWN";
printf("colorformat: %s\n", colorformat.c_str());
std::string imagetype;
if (vi.image_type & VideoInfo::IT_BFF) imagetype += "BFF, ";
if (vi.image_type & VideoInfo::IT_TFF) imagetype += "TFF, ";
if (vi.image_type & VideoInfo::IT_FIELDBASED) imagetype += "FIELDBASED, ";
if (imagetype.length() > 0)
imagetype.erase(imagetype.length()-2);
else
imagetype = "UNKNOWN";
printf("image_type: %s\n", imagetype.c_str());
printf("bits per pixel: %d\n", vi.BitsPerPixel());
}
else
printf("NO VIDEO\n");
if (vi.HasAudio()) {
printf("audio channels: %d\n", vi.nchannels);
printf("sample_type: %x\n", vi.sample_type);
printf("samples per second: %d\n", vi.audio_samples_per_second);
printf("bytes per channel sample: %d\n", vi.BytesPerChannelSample());
printf("bytes per audio sample: %d\n", vi.BytesPerAudioSample());
printf("num_audio_samples: %lld\n", vi.num_audio_samples);
}
else
printf("NO AUDIO\n");
printf("-----------\n\n");
if (!vi.HasVideo()) {
printf("Can't start video playback for the sequence without video.\n\n");
return -1;
}
printf("Starting playback ...\n");
prev_frame = clip->GetFrame(0, env);
int framesize = prev_frame->GetFrameBuffer()->GetDataSize();
printf("INFO: framesize = %d bytes.\n\n", framesize);
InitializeCriticalSection(&cs);
SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE);
int error_count = 0;
int dup_start_frame = 0;
bool flag_dup = false;
std::vector<std::pair<int, int>> duplicates;
for(int i=1; i<vi.num_frames; ++i) {
EnterCriticalSection(&cs);
dst = clip->GetFrame(i, env);
const BYTE *src_ptr = prev_frame->GetFrameBuffer()->GetReadPtr();
//.........这里部分代码省略.........
示例4: if
extern "C" HRESULT __stdcall get_stream_info(VF_FileHandle in, DWORD stream, void *out)
{
if (stream == VF_STREAM_VIDEO)
{
LPVF_StreamInfo_Video info = (LPVF_StreamInfo_Video)out;
if (info == NULL)
return VF_ERROR;
if (info->dwSize != sizeof(VF_StreamInfo_Video))
return VF_ERROR;
vfMI *i = (vfMI*)in;
if (i->type == D2V_TYPE)
{
info->dwLengthL = i->vi->num_frames;
if (i->vi->fps_denominator) info->dwRate = i->vi->fps_numerator;
else info->dwRate = 0;
info->dwScale = i->vi->fps_denominator;
info->dwWidth = i->vi->width;
info->dwHeight = i->vi->height;
}
else
{
const VideoInfo vit = (*i->clip)->GetVideoInfo();
info->dwLengthL = vit.num_frames;
if (vit.fps_denominator) info->dwRate = vit.fps_numerator;
else info->dwRate = 0;
info->dwScale = vit.fps_denominator;
info->dwWidth = vit.width;
info->dwHeight = vit.height;
}
info->dwBitCount = 24;
}
else if (stream == VF_STREAM_AUDIO)
{
LPVF_StreamInfo_Audio info = (LPVF_StreamInfo_Audio)out;
if (info == NULL)
return VF_ERROR;
if (info->dwSize != sizeof(VF_StreamInfo_Audio))
return VF_ERROR;
vfMI *i = (vfMI*)in;
if (i->type == D2V_TYPE)
return VF_ERROR;
else
{
const VideoInfo vit = (*i->clip)->GetVideoInfo();
if (!vit.HasAudio()) return VF_ERROR;
info->dwLengthL = (unsigned long)vit.num_audio_samples;
info->dwChannels = vit.nchannels;
info->dwRate = vit.audio_samples_per_second * vit.BytesPerAudioSample();
info->dwScale = vit.BytesPerAudioSample();
info->dwBitsPerSample = vit.BytesPerChannelSample()*8;
info->dwBlockAlign = vit.BytesPerAudioSample();
}
}
else return VF_ERROR;
return VF_OK;
}