本文整理汇总了C++中CompositorContext::getFramenumber方法的典型用法代码示例。如果您正苦于以下问题:C++ CompositorContext::getFramenumber方法的具体用法?C++ CompositorContext::getFramenumber怎么用?C++ CompositorContext::getFramenumber使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CompositorContext
的用法示例。
在下文中一共展示了CompositorContext::getFramenumber方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: convertToOperations
void TimeNode::convertToOperations(NodeConverter &converter, const CompositorContext &context) const
{
SetValueOperation *operation = new SetValueOperation();
bNode *node = this->getbNode();
/* stack order output: fac */
float fac = 0.0f;
const int framenumber = context.getFramenumber();
if (framenumber < node->custom1) {
fac = 0.0f;
}
else if (framenumber > node->custom2) {
fac = 1.0f;
}
else if (node->custom1 < node->custom2) {
fac = (context.getFramenumber() - node->custom1) / (float)(node->custom2 - node->custom1);
}
curvemapping_initialize((CurveMapping *)node->storage);
fac = curvemapping_evaluateF((CurveMapping *)node->storage, 0, fac);
operation->setValue(clamp_f(fac, 0.0f, 1.0f));
converter.addOperation(operation);
converter.mapOutputSocket(getOutputSocket(0), operation->getOutputSocket());
}
示例2: ScaleOperation
void Stabilize2dNode::convertToOperations(NodeConverter &converter, const CompositorContext &context) const
{
NodeInput *imageInput = this->getInputSocket(0);
MovieClip *clip = (MovieClip *)getbNode()->id;
ScaleOperation *scaleOperation = new ScaleOperation();
scaleOperation->setSampler((PixelSampler)this->getbNode()->custom1);
RotateOperation *rotateOperation = new RotateOperation();
rotateOperation->setDoDegree2RadConversion(false);
TranslateOperation *translateOperation = new TranslateOperation();
MovieClipAttributeOperation *scaleAttribute = new MovieClipAttributeOperation();
MovieClipAttributeOperation *angleAttribute = new MovieClipAttributeOperation();
MovieClipAttributeOperation *xAttribute = new MovieClipAttributeOperation();
MovieClipAttributeOperation *yAttribute = new MovieClipAttributeOperation();
SetSamplerOperation *psoperation = new SetSamplerOperation();
psoperation->setSampler((PixelSampler)this->getbNode()->custom1);
scaleAttribute->setAttribute(MCA_SCALE);
scaleAttribute->setFramenumber(context.getFramenumber());
scaleAttribute->setMovieClip(clip);
angleAttribute->setAttribute(MCA_ANGLE);
angleAttribute->setFramenumber(context.getFramenumber());
angleAttribute->setMovieClip(clip);
xAttribute->setAttribute(MCA_X);
xAttribute->setFramenumber(context.getFramenumber());
xAttribute->setMovieClip(clip);
yAttribute->setAttribute(MCA_Y);
yAttribute->setFramenumber(context.getFramenumber());
yAttribute->setMovieClip(clip);
converter.addOperation(scaleAttribute);
converter.addOperation(angleAttribute);
converter.addOperation(xAttribute);
converter.addOperation(yAttribute);
converter.addOperation(scaleOperation);
converter.addOperation(translateOperation);
converter.addOperation(rotateOperation);
converter.addOperation(psoperation);
converter.mapInputSocket(imageInput, scaleOperation->getInputSocket(0));
converter.addLink(scaleAttribute->getOutputSocket(), scaleOperation->getInputSocket(1));
converter.addLink(scaleAttribute->getOutputSocket(), scaleOperation->getInputSocket(2));
converter.addLink(scaleOperation->getOutputSocket(), rotateOperation->getInputSocket(0));
converter.addLink(angleAttribute->getOutputSocket(), rotateOperation->getInputSocket(1));
converter.addLink(rotateOperation->getOutputSocket(), translateOperation->getInputSocket(0));
converter.addLink(xAttribute->getOutputSocket(), translateOperation->getInputSocket(1));
converter.addLink(yAttribute->getOutputSocket(), translateOperation->getInputSocket(2));
converter.addLink(translateOperation->getOutputSocket(), psoperation->getInputSocket(0));
converter.mapOutputSocket(getOutputSocket(), psoperation->getOutputSocket());
}
示例3: convertToOperations
void MovieDistortionNode::convertToOperations(NodeConverter &converter, const CompositorContext &context) const
{
bNode *bnode = this->getbNode();
MovieClip *clip = (MovieClip *)bnode->id;
NodeInput *inputSocket = this->getInputSocket(0);
NodeOutput *outputSocket = this->getOutputSocket(0);
MovieDistortionOperation *operation = new MovieDistortionOperation(bnode->custom1 == 1);
operation->setMovieClip(clip);
operation->setFramenumber(context.getFramenumber());
converter.addOperation(operation);
converter.mapInputSocket(inputSocket, operation->getInputSocket(0));
converter.mapOutputSocket(outputSocket, operation->getOutputSocket(0));
}
示例4: convertToOperations
void KeyingScreenNode::convertToOperations(NodeConverter &converter, const CompositorContext &context) const
{
bNode *editorNode = this->getbNode();
MovieClip *clip = (MovieClip *) editorNode->id;
NodeKeyingScreenData *keyingscreen_data = (NodeKeyingScreenData *) editorNode->storage;
NodeOutput *outputScreen = this->getOutputSocket(0);
// always connect the output image
KeyingScreenOperation *operation = new KeyingScreenOperation();
operation->setMovieClip(clip);
operation->setTrackingObject(keyingscreen_data->tracking_object);
operation->setFramenumber(context.getFramenumber());
converter.addOperation(operation);
converter.mapOutputSocket(outputScreen, operation->getOutputSocket());
}
示例5: convertToOperations
void TrackPositionNode::convertToOperations(NodeConverter &converter, const CompositorContext &context) const
{
bNode *editorNode = this->getbNode();
MovieClip *clip = (MovieClip *) editorNode->id;
NodeTrackPosData *trackpos_data = (NodeTrackPosData *) editorNode->storage;
NodeOutput *outputX = this->getOutputSocket(0);
NodeOutput *outputY = this->getOutputSocket(1);
int frame_number;
if (editorNode->custom1 == CMP_TRACKPOS_ABSOLUTE_FRAME) {
frame_number = editorNode->custom2;
}
else {
frame_number = context.getFramenumber();
}
TrackPositionOperation *operationX = new TrackPositionOperation();
operationX->setMovieClip(clip);
operationX->setTrackingObject(trackpos_data->tracking_object);
operationX->setTrackName(trackpos_data->track_name);
operationX->setFramenumber(frame_number);
operationX->setAxis(0);
operationX->setPosition(editorNode->custom1);
operationX->setRelativeFrame(editorNode->custom2);
converter.addOperation(operationX);
TrackPositionOperation *operationY = new TrackPositionOperation();
operationY->setMovieClip(clip);
operationY->setTrackingObject(trackpos_data->tracking_object);
operationY->setTrackName(trackpos_data->track_name);
operationY->setFramenumber(frame_number);
operationY->setAxis(1);
operationY->setPosition(editorNode->custom1);
operationY->setRelativeFrame(editorNode->custom2);
converter.addOperation(operationY);
converter.mapOutputSocket(outputX, operationX->getOutputSocket());
converter.mapOutputSocket(outputY, operationY->getOutputSocket());
}
示例6: convertToOperations
void MovieClipNode::convertToOperations(NodeConverter &converter, const CompositorContext &context) const
{
NodeOutput *outputMovieClip = this->getOutputSocket(0);
NodeOutput *alphaMovieClip = this->getOutputSocket(1);
NodeOutput *offsetXMovieClip = this->getOutputSocket(2);
NodeOutput *offsetYMovieClip = this->getOutputSocket(3);
NodeOutput *scaleMovieClip = this->getOutputSocket(4);
NodeOutput *angleMovieClip = this->getOutputSocket(5);
bNode *editorNode = this->getbNode();
MovieClip *movieClip = (MovieClip *)editorNode->id;
MovieClipUser *movieClipUser = (MovieClipUser *)editorNode->storage;
bool cacheFrame = !context.isRendering();
ImBuf *ibuf = NULL;
if (movieClip) {
if (cacheFrame)
ibuf = BKE_movieclip_get_ibuf(movieClip, movieClipUser);
else
ibuf = BKE_movieclip_get_ibuf_flag(movieClip, movieClipUser, movieClip->flag, MOVIECLIP_CACHE_SKIP);
}
// always connect the output image
MovieClipOperation *operation = new MovieClipOperation();
operation->setMovieClip(movieClip);
operation->setMovieClipUser(movieClipUser);
operation->setFramenumber(context.getFramenumber());
operation->setCacheFrame(cacheFrame);
converter.addOperation(operation);
converter.mapOutputSocket(outputMovieClip, operation->getOutputSocket());
converter.addPreview(operation->getOutputSocket());
MovieClipAlphaOperation *alphaOperation = new MovieClipAlphaOperation();
alphaOperation->setMovieClip(movieClip);
alphaOperation->setMovieClipUser(movieClipUser);
alphaOperation->setFramenumber(context.getFramenumber());
alphaOperation->setCacheFrame(cacheFrame);
converter.addOperation(alphaOperation);
converter.mapOutputSocket(alphaMovieClip, alphaOperation->getOutputSocket());
MovieTrackingStabilization *stab = &movieClip->tracking.stabilization;
float loc[2], scale, angle;
loc[0] = 0.0f;
loc[1] = 0.0f;
scale = 1.0f;
angle = 0.0f;
if (ibuf) {
if (stab->flag & TRACKING_2D_STABILIZATION) {
int clip_framenr = BKE_movieclip_remap_scene_to_clip_frame(movieClip, context.getFramenumber());
BKE_tracking_stabilization_data_get(&movieClip->tracking, clip_framenr, ibuf->x, ibuf->y, loc, &scale, &angle);
}
}
converter.addOutputValue(offsetXMovieClip, loc[0]);
converter.addOutputValue(offsetYMovieClip, loc[1]);
converter.addOutputValue(scaleMovieClip, scale);
converter.addOutputValue(angleMovieClip, angle);
if (ibuf) {
IMB_freeImBuf(ibuf);
}
}
示例7: convertToOperations
void ImageNode::convertToOperations(NodeConverter &converter, const CompositorContext &context) const
{
/// Image output
NodeOutput *outputImage = this->getOutputSocket(0);
bNode *editorNode = this->getbNode();
Image *image = (Image *)editorNode->id;
ImageUser *imageuser = (ImageUser *)editorNode->storage;
int framenumber = context.getFramenumber();
int numberOfOutputs = this->getNumberOfOutputSockets();
bool outputStraightAlpha = (editorNode->custom1 & CMP_NODE_IMAGE_USE_STRAIGHT_OUTPUT) != 0;
BKE_image_user_frame_calc(imageuser, context.getFramenumber(), 0);
/* force a load, we assume iuser index will be set OK anyway */
if (image && image->type == IMA_TYPE_MULTILAYER) {
bool is_multilayer_ok = false;
ImBuf *ibuf = BKE_image_acquire_ibuf(image, imageuser, NULL);
if (image->rr) {
RenderLayer *rl = (RenderLayer *)BLI_findlink(&image->rr->layers, imageuser->layer);
if (rl) {
NodeOutput *socket;
int index;
is_multilayer_ok = true;
for (index = 0; index < numberOfOutputs; index++) {
NodeOperation *operation = NULL;
socket = this->getOutputSocket(index);
bNodeSocket *bnodeSocket = socket->getbNodeSocket();
/* Passes in the file can differ from passes stored in sockets (#36755).
* Look up the correct file pass using the socket identifier instead.
*/
#if 0
NodeImageLayer *storage = (NodeImageLayer *)bnodeSocket->storage;*/
int passindex = storage->pass_index;*/
RenderPass *rpass = (RenderPass *)BLI_findlink(&rl->passes, passindex);
#endif
int passindex;
RenderPass *rpass;
for (rpass = (RenderPass *)rl->passes.first, passindex = 0; rpass; rpass = rpass->next, ++passindex)
if (STREQ(rpass->name, bnodeSocket->identifier))
break;
if (rpass) {
imageuser->pass = passindex;
switch (rpass->channels) {
case 1:
operation = doMultilayerCheck(converter, rl, image, imageuser, framenumber, index, passindex, COM_DT_VALUE);
break;
/* using image operations for both 3 and 4 channels (RGB and RGBA respectively) */
/* XXX any way to detect actual vector images? */
case 3:
operation = doMultilayerCheck(converter, rl, image, imageuser, framenumber, index, passindex, COM_DT_VECTOR);
break;
case 4:
operation = doMultilayerCheck(converter, rl, image, imageuser, framenumber, index, passindex, COM_DT_COLOR);
break;
default:
/* dummy operation is added below */
break;
}
if (index == 0 && operation) {
converter.addPreview(operation->getOutputSocket());
}
}
/* incase we can't load the layer */
if (operation == NULL)
converter.setInvalidOutput(getOutputSocket(index));
}
}
}
示例8: convertToOperations
void ImageNode::convertToOperations(NodeConverter &converter, const CompositorContext &context) const
{
/// Image output
NodeOutput *outputImage = this->getOutputSocket(0);
bNode *editorNode = this->getbNode();
Image *image = (Image *)editorNode->id;
ImageUser *imageuser = (ImageUser *)editorNode->storage;
int framenumber = context.getFramenumber();
int numberOfOutputs = this->getNumberOfOutputSockets();
bool outputStraightAlpha = (editorNode->custom1 & CMP_NODE_IMAGE_USE_STRAIGHT_OUTPUT) != 0;
BKE_image_user_frame_calc(imageuser, context.getFramenumber(), 0);
/* force a load, we assume iuser index will be set OK anyway */
if (image && image->type == IMA_TYPE_MULTILAYER) {
bool is_multilayer_ok = false;
ImBuf *ibuf = BKE_image_acquire_ibuf(image, imageuser, NULL);
if (image->rr) {
RenderLayer *rl = (RenderLayer *)BLI_findlink(&image->rr->layers, imageuser->layer);
if (rl) {
NodeOutput *socket;
int index;
is_multilayer_ok = true;
for (index = 0; index < numberOfOutputs; index++) {
NodeOperation *operation = NULL;
socket = this->getOutputSocket(index);
bNodeSocket *bnodeSocket = socket->getbNodeSocket();
RenderPass *rpass = (RenderPass *)BLI_findstring(&rl->passes, bnodeSocket->identifier, offsetof(RenderPass, internal_name));
int view = 0;
/* Passes in the file can differ from passes stored in sockets (#36755).
* Look up the correct file pass using the socket identifier instead.
*/
#if 0
NodeImageLayer *storage = (NodeImageLayer *)bnodeSocket->storage;*/
int passindex = storage->pass_index;*/
RenderPass *rpass = (RenderPass *)BLI_findlink(&rl->passes, passindex);
#endif
/* returns the image view to use for the current active view */
if (BLI_listbase_count_ex(&image->rr->views, 2) > 1) {
const int view_image = imageuser->view;
const bool is_allview = (view_image == 0); /* if view selected == All (0) */
if (is_allview) {
/* heuristic to match image name with scene names
* check if the view name exists in the image */
view = BLI_findstringindex(&image->rr->views, context.getViewName(), offsetof(RenderView, name));
if (view == -1) view = 0;
}
else {
view = view_image - 1;
}
}
if (rpass) {
switch (rpass->channels) {
case 1:
operation = doMultilayerCheck(converter, rl, image, imageuser, framenumber, index,
rpass->passtype, view, COM_DT_VALUE);
break;
/* using image operations for both 3 and 4 channels (RGB and RGBA respectively) */
/* XXX any way to detect actual vector images? */
case 3:
operation = doMultilayerCheck(converter, rl, image, imageuser, framenumber, index,
rpass->passtype, view, COM_DT_VECTOR);
break;
case 4:
operation = doMultilayerCheck(converter, rl, image, imageuser, framenumber, index,
rpass->passtype, view, COM_DT_COLOR);
break;
default:
/* dummy operation is added below */
break;
}
if (index == 0 && operation) {
converter.addPreview(operation->getOutputSocket());
}
if (rpass->passtype == SCE_PASS_COMBINED) {
BLI_assert(operation != NULL);
BLI_assert(index < numberOfOutputs - 1);
NodeOutput *outputSocket = this->getOutputSocket(index + 1);
SeparateChannelOperation *separate_operation;
separate_operation = new SeparateChannelOperation();
separate_operation->setChannel(3);
converter.addOperation(separate_operation);
converter.addLink(operation->getOutputSocket(), separate_operation->getInputSocket(0));
converter.mapOutputSocket(outputSocket, separate_operation->getOutputSocket());
index++;
}
}
/* incase we can't load the layer */
if (operation == NULL)
converter.setInvalidOutput(getOutputSocket(index));
}
}
}
示例9: convertToOperations
void TrackPositionNode::convertToOperations(NodeConverter &converter, const CompositorContext &context) const
{
bNode *editorNode = this->getbNode();
MovieClip *clip = (MovieClip *) editorNode->id;
NodeTrackPosData *trackpos_data = (NodeTrackPosData *) editorNode->storage;
NodeOutput *outputX = this->getOutputSocket(0);
NodeOutput *outputY = this->getOutputSocket(1);
NodeOutput *outputSpeed = this->getOutputSocket(2);
int frame_number;
if (editorNode->custom1 == CMP_TRACKPOS_ABSOLUTE_FRAME) {
frame_number = editorNode->custom2;
}
else {
frame_number = context.getFramenumber();
}
TrackPositionOperation *operationX = new TrackPositionOperation();
operationX->setMovieClip(clip);
operationX->setTrackingObject(trackpos_data->tracking_object);
operationX->setTrackName(trackpos_data->track_name);
operationX->setFramenumber(frame_number);
operationX->setAxis(0);
operationX->setPosition(editorNode->custom1);
operationX->setRelativeFrame(editorNode->custom2);
converter.addOperation(operationX);
converter.mapOutputSocket(outputX, operationX->getOutputSocket());
TrackPositionOperation *operationY = new TrackPositionOperation();
operationY->setMovieClip(clip);
operationY->setTrackingObject(trackpos_data->tracking_object);
operationY->setTrackName(trackpos_data->track_name);
operationY->setFramenumber(frame_number);
operationY->setAxis(1);
operationY->setPosition(editorNode->custom1);
operationY->setRelativeFrame(editorNode->custom2);
converter.addOperation(operationY);
converter.mapOutputSocket(outputY, operationY->getOutputSocket());
TrackPositionOperation *operationMotionPreX =
create_motion_operation(converter, clip, trackpos_data, 0, frame_number, -1);
TrackPositionOperation *operationMotionPreY =
create_motion_operation(converter, clip, trackpos_data, 1, frame_number, -1);
TrackPositionOperation *operationMotionPostX =
create_motion_operation(converter, clip, trackpos_data, 0, frame_number, 1);
TrackPositionOperation *operationMotionPostY =
create_motion_operation(converter, clip, trackpos_data, 1, frame_number, 1);
CombineChannelsOperation *combine_operation = new CombineChannelsOperation();
converter.addOperation(combine_operation);
converter.addLink(operationMotionPreX->getOutputSocket(),
combine_operation->getInputSocket(0));
converter.addLink(operationMotionPreY->getOutputSocket(),
combine_operation->getInputSocket(1));
converter.addLink(operationMotionPostX->getOutputSocket(),
combine_operation->getInputSocket(2));
converter.addLink(operationMotionPostY->getOutputSocket(),
combine_operation->getInputSocket(3));
converter.mapOutputSocket(outputSpeed, combine_operation->getOutputSocket());
}