本文整理汇总了C++中PClip::GetFrame方法的典型用法代码示例。如果您正苦于以下问题:C++ PClip::GetFrame方法的具体用法?C++ PClip::GetFrame怎么用?C++ PClip::GetFrame使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PClip
的用法示例。
在下文中一共展示了PClip::GetFrame方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main() {
try {
cout << "Creating script environment 1..." << endl;
IScriptEnvironment* env1 = CreateScriptEnvironment(3);
cout << "Creating script environment 2..." << endl;
IScriptEnvironment* env2 = CreateScriptEnvironment(3);
cout << "Deleting script environment 1..." << endl;
delete env1;
cout << "Invoking BlankClip on env 2..." << endl;
AVSValue ret = env2->Invoke("BlankClip", AVSValue(), 0);
PClip clp = ret.AsClip();
cout << "Reading frame 0 from env2..." << endl;
PVideoFrame frm = clp->GetFrame(0, env2);
} catch (AvisynthError &e) {
cerr << "AvisynthError: " << e.msg << endl;
return -1;
} catch (...) {
cerr << "unknown error" << endl;
return -1;
}
return 0;
}
示例2: GetFrame
PVideoFrame __stdcall ShapeMask::GetFrame(int n, IScriptEnvironment* env) {
int colorspace;
if (vi.IsRGB24()) colorspace = RGB24;
else if (vi.IsRGB32()) colorspace = RGB32;
else if (vi.IsYUY2()) colorspace = YUV2;
else if (vi.IsYV12()) colorspace = YV12;
else raiseError(env, "Unsupported color space, must be one of RGB24, RGB32, YUV2 or YV12");
PClip srcClip = toGrayScale(env, child);
PVideoFrame src = srcClip->GetFrame(n, env);
PVideoFrame dst = env->NewVideoFrame(vi);
const uchar* srcp = src->GetReadPtr();
const int src_pitch = src->GetPitch();
const int bpp = vi.BitsPerPixel();
uchar* retp;
// No change to the source pixels in the process steps, so ok to cast to non-const
// returns a 1 channel gray scale image which needs to be converted to whatever format the source clip is in.
retp = process_frame((uchar*)srcp, vi.width, vi.height, src_pitch, colorspace, threshold, minarea, rectonly);
if (vi.IsPlanar()) copyPlanar(retp, dst, bpp);
else if (vi.IsYUY2()) copyYUY2(retp, dst);
else copyRGB(retp, dst, bpp);
delete retp;
return dst;
}
示例3: GetFrame
PVideoFrame __stdcall SimpleSample::GetFrame(int n, IScriptEnvironment* env) {
// This is the implementation of the GetFrame function.
// See the header definition for further info.
PVideoFrame src = child->GetFrame(n, env);
// Request frame 'n' from the child (source) clip.
PVideoFrame window = WindowVideo->GetFrame(n, env);
// Request frame "'n" from the WindowVideo clip
PVideoFrame dst = env->NewVideoFrame(vi);
// Construct a frame based on the information of the current frame
// contained in the "vi" struct.
/* GstAVSynth: copy timestamp from source to destination buffer
* without modifying it
*/
dst->SetTimestamp (src->GetTimestamp ());
const unsigned char* srcp = src->GetReadPtr();
// Request a Read pointer from the source frame.
// This will return the position of the upperleft pixel in YUY2 images,
// and return the lower-left pixel in RGB.
// RGB images are stored upside-down in memory.
// You should still process images from line 0 to height.
unsigned char* dstp = dst->GetWritePtr();
// Request a Write pointer from the newly created destination image.
// You can request a writepointer to images that have just been
// created by NewVideoFrame. If you recieve a frame from PClip->GetFrame(...)
// you must call env->MakeWritable(&frame) be recieve a valid write pointer.
const int dst_pitch = dst->GetPitch();
// Requests pitch (length of a line) of the destination image.
// For more information on pitch see: http://www.avisynth.org/index.php?page=WorkingWithImages
// (short version - pitch is always equal to or greater than width to allow for seriously fast assembly code)
const int dst_width = dst->GetRowSize();
// Requests rowsize (number of used bytes in a line.
// See the link above for more information.
const int dst_height = dst->GetHeight();
// Requests the height of the destination image.
const int src_pitch = src->GetPitch();
const int src_width = src->GetRowSize();
const int src_height = src->GetHeight();
const unsigned char* windowp=window->GetReadPtr();
const int window_pitch = window->GetPitch();
const int window_width = window->GetRowSize();
const int window_height = window->GetHeight();
// Get info on the Windowed Clip (see src definitions for more information)
int w, h;
// This version of SimpleSample is intended to show how to utilise information from 2 clips in YUY2
// colourspace only. The original V1.6 code has been left in place fro all other
// colourspaces.
// It is designed purely for clarity and not as good or clever code :-)
if (vi.IsRGB24()) {
// The code just deals with RGB24 colourspace where each pixel is represented by
// 3 bytes, Blue, Green and Red.
// Although this colourspace is the easiest to understand, it is very rarely used because
// a 3 byte sequence (24bits) cannot be processed easily using normal 32 bit registers.
/*
for (h=0; h < src_height;h++) { // Loop from bottom line to top line.
for (w = 0; w < src_width; w+=3) { // Loop from left side of the image to the right side 1 pixel (3 bytes) at a time
// stepping 3 bytes (a pixel width in RGB24 space)
*(dstp + w) = *(srcp + w); // Copy each Blue byte from source to destination.
*(dstp + w + 1) = *(srcp + w + 1); // Copy Green.
*(dstp + w + 2) = *(srcp + w + 2); // Copy Red
}
srcp = srcp + src_pitch; // Add the pitch (note use of pitch and not width) of one line (in bytes) to the source pointer
dstp = dstp + dst_pitch; // Add the pitch to the destination pointer.
}
*/
env->BitBlt(dst->GetWritePtr(), dst->GetPitch(), src->GetReadPtr(), src->GetPitch(), src->GetRowSize(), src->GetHeight());
// end copy src to dst
//Now draw a white square in the middle of the frame
// Normally you'd do this code within the loop above but here it is in a separate loop for clarity;
dstp = dst->GetWritePtr(); // reset the destination pointer to the bottom, left pixel. (RGB colourspaces only)
dstp = dstp + (dst_height/2 - SquareSize/2)*dst_pitch; // move pointer to SquareSize/2 lines from the middle of the frame;
for (h=0; h < SquareSize;h++) { // only scan 100 lines
for (w = dst_width/2 - SquareSize*3/2; w < dst_width/2 + SquareSize*3/2; w+=3) { // only scans the middle SquareSize pixels of a line
*(dstp + w) = 255; // Set Blue to maximum value.
*(dstp + w + 1) = 255; // and Green.
*(dstp + w + 2) = 255; // and Red - therefore the whole pixel is now white.
}
dstp = dstp + dst_pitch;
}
}
if (vi.IsRGB32()) {
// This code deals with RGB32 colourspace where each pixel is represented by
// 4 bytes, Blue, Green and Red and "spare" byte that could/should be used for alpha
// keying but usually isn't.
//.........这里部分代码省略.........
示例4: OutputVideo
int AvxContext::OutputVideo() {
FILE *sink;
unsigned char *writeBuffer = NULL;
sighandler_t old_sigpipe = signal(SIGPIPE, SIG_IGN);
if (launchMPlayer) {
char command[1024];
if (MPlayerCommandVideo(command))
return -1;
AVXLOG_INFO("MPlayer command line: %s", command);
sink = popen(command, "w");
if (!sink) {
AVXLOG_ERROR("%s", "Error starting mplayer");
return -1;
}
} else {
sink = stdout;
}
writeBuffer = (unsigned char *)malloc(vi.RowSize() * vi.height);
if (!writeBuffer) {
AVXLOG_ERROR("%s", "Unable to allocate memory");
goto fail;
}
try {
for (int i = 0; i < vi.num_frames; ++i) {
if (launchMPlayer && (feof(sink) || ferror(sink))) {
AVXLOG_ERROR("%s", "mplayer process exited");
break;
}
PVideoFrame frame = clip->GetFrame(i, avx_library.env);
if (vi.IsPlanar()) { // Check plane count in 2.6.
int planes[] = {PLANAR_Y, PLANAR_V, PLANAR_U};
for (int j = 0; j < 3; ++j) {
int plane = planes[j];
int src_pitch = frame->GetPitch(plane);
int row_size = frame->GetRowSize(plane);
int height = frame->GetHeight(plane);
const unsigned char *srcp = frame->GetReadPtr(plane);
avx_library.env->BitBlt(writeBuffer, row_size, srcp, src_pitch, row_size, height);
fwrite(writeBuffer, 1, row_size * height, sink);
}
} else {
int src_pitch = frame->GetPitch();
int row_size = frame->GetRowSize();
int height = frame->GetHeight();
const unsigned char *srcp = frame->GetReadPtr();
avx_library.env->BitBlt(writeBuffer, row_size, srcp, src_pitch, row_size, height);
fwrite(writeBuffer, 1, row_size * height, sink);
}
}
} catch (AvisynthError &e) {
AVXLOG_ERROR("AvisynthError: %s", e.msg);
goto fail;
}
free(writeBuffer);
if (launchMPlayer)
pclose(sink);
signal(SIGPIPE, old_sigpipe);
return 0;
fail:
if (writeBuffer)
free(writeBuffer);
if (launchMPlayer)
pclose(sink);
signal(SIGPIPE, old_sigpipe);
return -1;
};
示例5: main
int main(int argc, TCHAR* argv[]) {
SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED);
printf("Usage: filmtester <avs filename> [duplicates_maxlength=2]\n");
printf("The program plays the AVS file and tests for frame duplicates\n\n");
int duplicates_maxlength = 2;
if (argc < 2) {
printf("No filename specified.\n\n");
return -1;
}
if (argc > 2) {
duplicates_maxlength = _ttoi(argv[2]);
printf("INFO: duplicates_maxlength set to %d\n", duplicates_maxlength);
}
IScriptEnvironment *env = CreateScriptEnvironment();
_tprintf(_T("Loading \"%s\" ...\n"), argv[1]);
LPCSTR arg_names[1] = { nullptr };
AVSValue arg_vals[1] = { (LPCSTR)argv[1] };
clip = env->Invoke("import", AVSValue(arg_vals,1), arg_names).AsClip();
printf("AVS file loaded successfully.\n\n");
VideoInfo vi = clip->GetVideoInfo();
printf("VideoInfo:\n");
printf("-----------\n");
if (vi.HasVideo()) {
printf("width x height: %dx%d\n", vi.width, vi.height);
printf("num_frames: %d\n", vi.num_frames);
printf("fps: %d/%d\n", vi.fps_numerator, vi.fps_denominator);
std::string colorspace;
if (vi.pixel_type & VideoInfo::CS_BGR) colorspace += "BGR, ";
if (vi.pixel_type & VideoInfo::CS_YUV) colorspace += "YUV, ";
if (vi.pixel_type & VideoInfo::CS_INTERLEAVED) colorspace += "INTERLEAVED, ";
if (vi.pixel_type & VideoInfo::CS_PLANAR) colorspace += "PLANAR, ";
if (colorspace.length() > 0) colorspace.erase(colorspace.length()-2);
printf("colorspace: %s\n", colorspace.c_str());
std::string colorformat;
if (vi.pixel_type & VideoInfo::CS_BGR24) colorformat += "BGR24, ";
if (vi.pixel_type & VideoInfo::CS_BGR32) colorformat += "BGR32, ";
if (vi.pixel_type & VideoInfo::CS_YUY2) colorformat += "YUY2, ";
if (vi.pixel_type & VideoInfo::CS_YV12) colorformat += "YV12, ";
if (vi.pixel_type & VideoInfo::CS_I420) colorformat += "I420 (IYUV), ";
if (colorformat.length() > 0)
colorformat.erase(colorformat.length()-2);
else
colorformat = "UNKNOWN";
printf("colorformat: %s\n", colorformat.c_str());
std::string imagetype;
if (vi.image_type & VideoInfo::IT_BFF) imagetype += "BFF, ";
if (vi.image_type & VideoInfo::IT_TFF) imagetype += "TFF, ";
if (vi.image_type & VideoInfo::IT_FIELDBASED) imagetype += "FIELDBASED, ";
if (imagetype.length() > 0)
imagetype.erase(imagetype.length()-2);
else
imagetype = "UNKNOWN";
printf("image_type: %s\n", imagetype.c_str());
printf("bits per pixel: %d\n", vi.BitsPerPixel());
}
else
printf("NO VIDEO\n");
if (vi.HasAudio()) {
printf("audio channels: %d\n", vi.nchannels);
printf("sample_type: %x\n", vi.sample_type);
printf("samples per second: %d\n", vi.audio_samples_per_second);
printf("bytes per channel sample: %d\n", vi.BytesPerChannelSample());
printf("bytes per audio sample: %d\n", vi.BytesPerAudioSample());
printf("num_audio_samples: %lld\n", vi.num_audio_samples);
}
else
printf("NO AUDIO\n");
printf("-----------\n\n");
if (!vi.HasVideo()) {
printf("Can't start video playback for the sequence without video.\n\n");
return -1;
}
printf("Starting playback ...\n");
prev_frame = clip->GetFrame(0, env);
int framesize = prev_frame->GetFrameBuffer()->GetDataSize();
printf("INFO: framesize = %d bytes.\n\n", framesize);
InitializeCriticalSection(&cs);
SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE);
int error_count = 0;
int dup_start_frame = 0;
bool flag_dup = false;
std::vector<std::pair<int, int>> duplicates;
for(int i=1; i<vi.num_frames; ++i) {
EnterCriticalSection(&cs);
dst = clip->GetFrame(i, env);
const BYTE *src_ptr = prev_frame->GetFrameBuffer()->GetReadPtr();
//.........这里部分代码省略.........