本文整理汇总了C++中PClip::GetVideoInfo方法的典型用法代码示例。如果您正苦于以下问题:C++ PClip::GetVideoInfo方法的具体用法?C++ PClip::GetVideoInfo怎么用?C++ PClip::GetVideoInfo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PClip
的用法示例。
在下文中一共展示了PClip::GetVideoInfo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Create_SoundTouch
AVSValue __cdecl Create_SoundTouch(AVSValue args, void*, IScriptEnvironment* env) {
try { // HIDE DAMN SEH COMPILER BUG!!!
PClip clip = args[0].AsClip();
if (!clip->GetVideoInfo().HasAudio())
env->ThrowError("Input clip does not have audio.");
if (!(clip->GetVideoInfo().SampleType()&SAMPLE_FLOAT))
env->ThrowError("Input audio sample format to TimeStretch must be float.");
if (args[0].AsClip()->GetVideoInfo().AudioChannels() == 2) {
return new AVSStereoSoundTouch(args[0].AsClip(),
(float)args[1].AsFloat(100.0),
(float)args[2].AsFloat(100.0),
(float)args[3].AsFloat(100.0),
&args[4],
env);
}
return new AVSsoundtouch(args[0].AsClip(),
(float)args[1].AsFloat(100.0),
(float)args[2].AsFloat(100.0),
(float)args[3].AsFloat(100.0),
&args[4],
env);
}
catch (...) { throw; }
}
示例2: GenericVideoFilter
FTurn::FTurn(PClip child, TurnDirection direction, bool chroma, bool mt, IScriptEnvironment* env)
: GenericVideoFilter(child), chroma_(chroma), mt_(mt) {
if (!isSupportedColorspace(vi.pixel_type)) {
env->ThrowError(getUnsupportedColorspaceMessage());
}
if (!(env->GetCPUFlags() & CPUF_SSE2)) {
env->ThrowError("Sorry, at least SSE2 is required");
}
int CPUInfo[4]; //eax, ebx, ecx, edx
__cpuid(CPUInfo, 1);
#pragma warning(disable: 4800)
bool ssse3 = CPUInfo[2] & 0x00000200;
#pragma warning(disable: 4800)
if (direction == TurnDirection::RIGHT || direction == TurnDirection::LEFT) {
vi.width = child->GetVideoInfo().height;
vi.height = child->GetVideoInfo().width;
if (direction == TurnDirection::LEFT) {
turnFunction_ = turnPlaneLeft;
} else {
turnFunction_ = ssse3 ? turnPlaneRightSSSE3 : turnPlaneRightSSE2;
}
} else {
turnFunction_ = ssse3 ? turnPlane180SSSE3 : turnPlane180SSE2;
}
}
示例3: SupportFilter
Binary::Binary(BinaryComputePlane *_computeplane, PClip _child, PClip _secondclip, AVSValue _chroma, IScriptEnvironment *env)
: SupportFilter(_child, env), secondclip(_secondclip),
computeplane(*_computeplane),
computechroma( *(decipherchromaargument(_chroma, makevector(_child,_secondclip), *_computeplane) ) )
{
if(_secondclip->GetVideoInfo().width != vi.width)
env->ThrowError("LimitedSupport binary filter: widths do not match.");
if(_secondclip->GetVideoInfo().height != vi.height)
env->ThrowError("LimitedSupport binary filter: heights do not match.");
}
示例4: GenericVideoFilter
TMaskCleaner::TMaskCleaner(PClip child, int length, int thresh, IScriptEnvironment* env) : GenericVideoFilter(child), m_length(length), m_thresh(thresh), lookup(nullptr) {
if (!child->GetVideoInfo().IsYV12()) {
env->ThrowError("Only YV12 and YV24 is supported!");
}
if (length <= 0 || thresh <= 0) {
env->ThrowError("Invalid arguments!");
}
lookup = new BYTE[child->GetVideoInfo().height * child->GetVideoInfo().width / 8];
m_w = child->GetVideoInfo().width;
}
示例5: CreateAreaResize
AVSValue __cdecl CreateAreaResize(AVSValue args, void* user_data, IScriptEnvironment* env)
{
PClip clip = args[0].AsClip();
int target_width = args[1].AsInt();
int target_height = args[2].AsInt();
if (target_width < 1 || target_height < 1) {
env->ThrowError("AreaResize: target width/height must be 1 or higher.");
}
const VideoInfo& vi = clip->GetVideoInfo();
if (vi.IsYUY2()) {
env->ThrowError("AreaResize: Unsupported colorspace(YUY2).");
}
if (vi.IsYV411() && target_width & 3) {
env->ThrowError("AreaResize: Target width requires mod 4.");
}
if ((vi.IsYV16() || vi.IsYV12()) && target_width & 1) {
env->ThrowError("AreaResize: Target width requires mod 2.");
}
if (vi.IsYV12() && target_height & 1) {
env->ThrowError("AreaResize: Target height requires mod 2.");
}
if (vi.width < target_width || vi.height < target_height) {
env->ThrowError("AreaResize: This filter is only for down scale.");
}
return new AreaResize(clip, target_width, target_height, env);
}
示例6: GenericVideoFilter
FilteredResizeH::FilteredResizeH( PClip _child, double subrange_left, double subrange_width,
int target_width, ResamplingFunction* func, IScriptEnvironment* env )
: GenericVideoFilter(_child), tempY(0), tempUV(0),pattern_luma(0),pattern_chroma(0),
CodeGenerator(false) //Josh: Codegenerator construtor takes arg x64
{
try { // HIDE DAMN SEH COMPILER BUG!!!
pattern_luma = pattern_chroma = (int *)0;
tempUV = tempY = 0;
original_width = _child->GetVideoInfo().width;
if (target_width<=0)
env->ThrowError("Resize: Width must be greater than 0.");
if (vi.IsYUV())
{
if ((target_width&1) && (vi.IsYUY2()))
env->ThrowError("Resize: YUY2 width must be even");
if ((target_width&1) && (vi.IsYV12()))
env->ThrowError("Resize: YV12 width must be even.");
tempY = (BYTE*) _aligned_malloc(original_width*2+4+32, 64); // aligned for Athlon cache line
tempUV = (BYTE*) _aligned_malloc(original_width*4+8+32, 64); // aligned for Athlon cache line
if (vi.IsYV12()) {
pattern_chroma = GetResamplingPatternYUV( vi.width>>1, subrange_left/2.0, subrange_width/2.0,
target_width>>1, func, true, tempY, env );
} else {
pattern_chroma = GetResamplingPatternYUV( vi.width>>1, subrange_left/2.0, subrange_width/2.0,
target_width>>1, func, false, tempUV, env );
}
pattern_luma = GetResamplingPatternYUV(vi.width, subrange_left, subrange_width, target_width, func, true, tempY, env);
}
示例7: Create
PClip AlignPlanar::Create(PClip clip)
{
if (!clip->GetVideoInfo().IsPlanar()) { // If not planar, already ok.
return clip;
}
else
return new AlignPlanar(clip);
}
示例8: MVDegrainBase
MVDegrainMulti::MVDegrainMulti(PClip _child, PClip mvMulti, int _RefFrames, int _thSAD, int _thSADC, int _YUVplanes, int _nLimit,
PClip _pelclip, int _nIdx, int _nSCD1, int _nSCD2, bool _mmx, bool _isse, int _MaxThreads,
int _PreFetch, int _SadMode, IScriptEnvironment* env) :
MVDegrainBase(_child, _RefFrames, _YUVplanes, _nLimit, _pelclip, _nIdx, _mmx, _isse, env, mvMulti,
"MVDegrainMulti", 0, _MaxThreads, _PreFetch, _SadMode), RefFrames(_RefFrames)
{
if (RefFrames<1 || RefFrames>32) env->ThrowError("MVDegrainMulti: refframes must be >=1 and <=32");
// get the true number of reference frames
VideoInfo mvMultivi=mvMulti->GetVideoInfo();
unsigned int RefFramesAvailable=mvMultivi.height/2;
// if refframes is greater than MVAnalyseMulti height then limit to height
if (RefFramesAvailable<RefFrames) {
RefFrames=RefFramesAvailable;
UpdateNumRefFrames(RefFrames, env);
}
// PreFetch max 21 since 21*3=63 and 64 is max threads at one time
if (_PreFetch<1 || _PreFetch>21) env->ThrowError("MVDegrainMulti: PreFetch must be >=1 and <=21");
if (_PreFetch*RefFrames>32) env->ThrowError("MVDegrainMulti: PreFetch*RefFrames<=32");
// initialize MVClip's which are in order BX, ..., B3, B2, B1, F1, F2, F3, ..., FX in mvMulti
for (unsigned int PreFetchNum=0; PreFetchNum<static_cast<unsigned int>(_PreFetch); ++PreFetchNum) {
if (RefFrames<RefFramesAvailable) {
// we are taking a subset of the mvMulti clip
for(unsigned int RefNum=0; RefNum<RefFrames; ++RefNum) {
pmvClipF[PreFetchNum][RefNum]=new MVClip(mvMulti, _nSCD1, _nSCD2, env, true, RefFramesAvailable+RefNum);
pmvClipB[PreFetchNum][RefNum]=new MVClip(mvMulti, _nSCD1, _nSCD2, env, true, RefFramesAvailable-RefNum-1);
}
}
else {
// we are taking the full mvMulti clip
for(unsigned int RefNum=0; RefNum<RefFrames; ++RefNum) {
pmvClipF[PreFetchNum][RefNum]=new MVClip(mvMulti, _nSCD1, _nSCD2, env, true, RefFrames+RefNum);
pmvClipB[PreFetchNum][RefNum]=new MVClip(mvMulti, _nSCD1, _nSCD2, env, true, RefFrames-RefNum-1);
}
}
}
// check simularities
CheckSimilarity(*pmvClipF[0][0], "mvMulti", env); // only need to check one since they are grouped together
// normalize thSAD
thSAD = _thSAD*pmvClipB[0][0]->GetThSCD1()/_nSCD1; // normalize to block SAD
thSADC = _thSADC*pmvClipB[0][0]->GetThSCD1()/_nSCD1; // chroma
// find the maximum extent
unsigned int MaxDelta=static_cast<unsigned int>(pmvClipF[0][RefFrames-1]->GetDeltaFrame());
if (static_cast<unsigned int>(pmvClipB[0][RefFrames-1]->GetDeltaFrame())>MaxDelta)
MaxDelta=static_cast<unsigned int>(pmvClipB[0][RefFrames-1]->GetDeltaFrame());
// numframes 2*MaxDelta+1, i.e. to cover all possible frames in sliding window
mvCore->AddFrames(nIdx, (2*MaxDelta)*_PreFetch+1, pmvClipB[0][0]->GetLevelCount(), nWidth, nHeight, nPel, nHPadding, nVPadding,
YUVPLANES, _isse, yRatioUV);
}
示例9:
AdjustedColorDifference::AdjustedColorDifference(IScriptEnvironment* env, PClip input, double factor, PClip subtrahend /* optional */) :
GenericVideoFilter(input), m_factor(factor), m_subtrahend(subtrahend)
{
if (!vi.IsRGB32())
env->ThrowError("plugin supports only RGB32 input");
if (subtrahend != nullptr) {
auto svi = subtrahend->GetVideoInfo();
CheckVideoInfo(env, vi, svi);
}
}
示例10: ColorQuantize
ColorQuantize(PClip originClip, int paletteSize,
bool useGlobalPalette, FREE_IMAGE_QUANTIZE algorithm,
const char *globalPaletteOutputFile, IScriptEnvironment* env)
: m_origin(originClip)
, m_paletteSize(paletteSize)
, m_useGlobalPalette(useGlobalPalette)
, m_algorithm(algorithm)
, m_targetVideoInfo(originClip->GetVideoInfo())
, m_globalPalette(0)
{
if (!originClip->GetVideoInfo().IsRGB24()) {
m_originRgb = env->Invoke("ConvertToRgb24", originClip).AsClip();
m_targetVideoInfo.pixel_type = VideoInfo::CS_BGR24;
} else {
m_originRgb = originClip;
}
if (m_useGlobalPalette) {
FIBITMAP *hugeImage =
FreeImage_Allocate(m_targetVideoInfo.width,
m_targetVideoInfo.height * m_targetVideoInfo.num_frames,
24);
for (int frame = 0; frame < m_targetVideoInfo.num_frames; ++frame) {
const PVideoFrame videoFrame = m_originRgb->GetFrame(frame, env);
copyVideoFrameToImage(m_originRgb->GetFrame(frame, env),hugeImage, frame * m_targetVideoInfo.height);
}
FIBITMAP *quantizedImage =
FreeImage_ColorQuantizeEx(hugeImage, algorithm, m_paletteSize);
FreeImage_Unload(hugeImage);
m_globalPalette = new RGBQUAD[m_paletteSize];
memcpy(m_globalPalette, FreeImage_GetPalette(quantizedImage), m_paletteSize * sizeof(RGBQUAD));
FreeImage_Unload(quantizedImage);
if (globalPaletteOutputFile)
savePaletteImage(globalPaletteOutputFile, m_globalPalette, m_paletteSize);
}
}
示例11: OpenFile
int AvxContext::OpenFile() {
try {
AVSValue ret = avx_library.env->Invoke("Import", scriptName);
if (!ret.IsClip()) {
AVXLOG_ERROR("%s", "Script did not return a clip");
return -1;
}
clip = ret.AsClip();
vi = clip->GetVideoInfo();
} catch (AvisynthError &e) {
AVXLOG_ERROR("AvisynthError: %s", e.msg);
return -1;
}
return 0;
}
示例12: caudio_type
audio_type& audio(void) {
const VideoInfo& vi = mv_clip->GetVideoInfo();
const caudio_type::info_type info = {
vi.HasAudio(),
vi.AudioChannels(),
caudio_type::bit_depth(vi.sample_type),
(vi.sample_type == SAMPLE_FLOAT ? false : true),
static_cast<double>(vi.num_audio_samples)
/ vi.SamplesPerSecond(),
vi.SamplesPerSecond(),
vi.num_audio_samples,
vi.BytesPerAudioSample()
};
if (mv_audio == NULL) mv_audio =
new caudio_type(mv_clip, mv_se.get(), info);
return *mv_audio;
}
示例13: CreateElements
AVSValue __cdecl StillImage::CreateElements(AVSValue args, void* user_data, IScriptEnvironment* env)
{
Q_UNUSED(user_data)
const PClip background = args[0].AsClip();
const VideoInfo backgroundVI = background->GetVideoInfo();
const AVSValue &elementValues = args[1];
QStringList elements;
for (int i = 0; i < elementValues.ArraySize(); ++i) {
const QLatin1String element(elementValues[i].AsString());
if (Filters::elementAvailable(element))
env->ThrowError("QtAviSynthElements: Invalid element '%s'.", element.latin1());
elements.append(element);
}
QImage image(backgroundVI.width, backgroundVI.height, QImage::Format_ARGB32);
image.fill(Tools::transparentColor);
QPainter p(&image);
Filters::paintElements(&p, elements, image.rect());
const PClip elementsClip = new StillImage(backgroundVI, image, env);
return new RgbOverlay(background, elementsClip, env);
}
示例14: Create_AutoTrace
AVSValue __cdecl Create_AutoTrace(AVSValue args, void* user_data, IScriptEnvironment* env) {
PClip clip = args[0].AsClip();
const VideoInfo& vi = clip->GetVideoInfo();
if (vi.IsRGB24()) {
at_fitting_opts_type* fitting_opts = at_fitting_opts_new();
// Setting fitting opts based on input
fitting_opts->color_count = args[3].AsInt(0);
int destWidth = args[1].AsInt(0);
int destHeight = args[2].AsInt(0);
// If the inputs are left off entirely (or 0 or negative), then use the
// input size. If either one is left off (or 0 or negative), then
// determine that one based on presevering the aspect ratio of the
// given value.
if (destWidth <= 0) {
if (destHeight <= 0) {
destWidth = vi.width;
destHeight = vi.height;
} else {
// Calculate width based off desired height
destWidth = destHeight * vi.width / vi.height;
}
} else if (destHeight <= 0) {
// Calculate height based off desired width
destHeight = destWidth * vi.height / vi.width;
}
if (args[4].Defined()) {
// background_color
int background = args[4].AsInt();
if (background != -1) {
// To match the documentation, ignore -1, even though it would
// be a valid color. (And argueably makes more sense than
// 0xFFFFFF, as it has the alpha channel set to full.)
// Note that R and B are swapped. This is by design - rather
// than convert a BGR image into an RGB image as AutoTrace
// expects, we just let the B and R channels be "backwards" as
// within AutoTrace.
fitting_opts->background_color = at_color_new(
(background & 0x0000FF),
(background & 0x00FF00) >> 8,
(background & 0xFF0000) >> 16);
}
示例15: cvideo_type
/*
* An object of a class cavs_type has a possession of mv_se,
* cvideo_type and caudio_type objects are just allowed to borrow
* mv_se.
* */
video_type& video(void) {
const VideoInfo& vi = mv_clip->GetVideoInfo();
const cvideo_type::info_type info = {
vi.HasVideo(),
vi.width,
vi.height,
static_cast<double>(vi.num_frames) * vi.fps_denominator
/ vi.fps_numerator,
static_cast<double>(vi.fps_numerator) / vi.fps_denominator,
vi.fps_numerator,
vi.fps_denominator,
vi.num_frames,
cvideo_type::fourcc(vi.pixel_type),
vi.BitsPerPixel(),
vi.IsFieldBased(),
vi.IsTFF()
};
if (mv_video == NULL) mv_video =
new cvideo_type(mv_clip, mv_se.get(), info);
return *mv_video;
}