本文整理汇总了C++中TComPicYuv::destroy方法的典型用法代码示例。如果您正苦于以下问题:C++ TComPicYuv::destroy方法的具体用法?C++ TComPicYuv::destroy怎么用?C++ TComPicYuv::destroy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TComPicYuv
的用法示例。
在下文中一共展示了TComPicYuv::destroy方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: renderUsedPelsMap
//.........这里部分代码省略.........
pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvLastBaseDepth.push_back(pcNewDepthPic);
}
}
// Create Buffer for synthesized View
TComPicYuv* pcPicYuvSynthOut = new TComPicYuv;
pcPicYuvSynthOut->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
Bool bAnyEOS = false;
Int iNumOfRenderedFrames = 0;
Int iFrame = 0;
while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS )
{
if ( iFrame >= m_iFrameSkip )
{
// read in depth and video
for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
{
m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad ) ;
apcPicYuvBaseVideo[iBaseViewIdx]->extendPicBorder();
bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad ) ;
apcPicYuvBaseDepth[iBaseViewIdx]->extendPicBorder();
bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
if ( m_bTempDepthFilter && (iFrame >= m_iFrameSkip) )
{
m_pcRenTop->temporalFilterVSRS( apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], apcPicYuvLastBaseVideo[iBaseViewIdx], apcPicYuvLastBaseDepth[iBaseViewIdx], ( iFrame == m_iFrameSkip) );
}
}
}
else
{
std::cout << "Skipping Frame " << iFrame << std::endl;
iFrame++;
continue;
}
m_cCameraData.update( (UInt) ( iFrame - m_iFrameSkip ) );
for(Int iViewIdx=1; iViewIdx < m_iNumberOfInputViews; iViewIdx++ )
{
std::cout << "Rendering UsedPelsMap for Frame " << iFrame << " of View " << (Double) m_cCameraData.getBaseViewNumbers()[iViewIdx] << std::endl;
Int iViewSIdx = m_cCameraData.getBaseId2SortedId()[iViewIdx];
Int iFirstViewSIdx = m_cCameraData.getBaseId2SortedId()[0];
AOT( iViewSIdx == iFirstViewSIdx );
Bool bFirstIsLeft = (iFirstViewSIdx < iViewSIdx);
m_pcRenTop->setShiftLUTs(
m_cCameraData.getBaseViewShiftLUTD()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTD()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
-1
);
m_pcRenTop->getUsedSamplesMap( apcPicYuvBaseDepth[0], pcPicYuvSynthOut, bFirstIsLeft );
// Write Output
m_apcTVideoIOYuvSynthOutput[iViewIdx-1]->write( pcPicYuvSynthOut, 0, 0, 0 );
}
iFrame++;
iNumOfRenderedFrames++;
}
// Delete Buffers
for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ )
{
apcPicYuvBaseVideo[uiBaseView]->destroy();
delete apcPicYuvBaseVideo[uiBaseView];
apcPicYuvBaseDepth[uiBaseView]->destroy();
delete apcPicYuvBaseDepth[uiBaseView];
// Temporal Filter
if ( m_bTempDepthFilter )
{
apcPicYuvLastBaseVideo[uiBaseView]->destroy();
delete apcPicYuvLastBaseVideo[uiBaseView];
apcPicYuvLastBaseDepth[uiBaseView]->destroy();
delete apcPicYuvLastBaseDepth[uiBaseView];
}
}
pcPicYuvSynthOut->destroy();
delete pcPicYuvSynthOut;
xDestroyLib();
}
示例2: xRenderModelFromNums
//.........这里部分代码省略.........
cCurModel.setBaseView( iBaseViewSIdx, apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], NULL, NULL );
}
}
}
else
{
iFrame++;
continue;
}
m_cCameraData.update( (UInt) (iFrame - m_iFrameSkip ));
for(Int iSynthViewIdx=0; iSynthViewIdx < m_iNumberOfOutputViews; iSynthViewIdx++ )
{
Int iLeftBaseViewIdx = -1;
Int iRightBaseViewIdx = -1;
Bool bIsBaseView = false;
Int iRelDistToLeft;
Bool bHasLRView = m_cCameraData.getLeftRightBaseView( iSynthViewIdx, iLeftBaseViewIdx, iRightBaseViewIdx, iRelDistToLeft, bIsBaseView );
Bool bHasLView = ( iLeftBaseViewIdx != -1 );
Bool bHasRView = ( iRightBaseViewIdx != -1 );
switch( m_iRenderDirection )
{
/// INTERPOLATION
case 0:
assert( bHasLRView || bIsBaseView );
if ( !bHasLRView && bIsBaseView ) // View to render is BaseView
{
std::cout << "Copied Frame " << iFrame << " of BaseView " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
apcPicYuvBaseVideo[iLeftBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); // Copy Original
}
else // Render
{
std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
cCurModel.setSingleModel( iSynthViewIdx,
m_cCameraData.getSynthViewShiftLUTI()[iLeftBaseViewIdx ][iSynthViewIdx] ,
m_cCameraData.getBaseViewShiftLUTI ()[iLeftBaseViewIdx ][iRightBaseViewIdx],
m_cCameraData.getSynthViewShiftLUTI()[iRightBaseViewIdx][iSynthViewIdx] ,
m_cCameraData.getBaseViewShiftLUTI ()[iRightBaseViewIdx][iLeftBaseViewIdx] ,
iRelDistToLeft,
NULL );
cCurModel.getSynthVideo ( iSynthViewIdx, VIEWPOS_MERGED, pcPicYuvSynthOut );
}
break;
/// EXTRAPOLATION FROM LEFT
case 1:
if ( !bHasLView ) // View to render is BaseView
{
std::cout << "Copied Frame " << iFrame << " of BaseView " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
apcPicYuvBaseVideo[iLeftBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); // Copy Original
}
else // Render
{
std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
cCurModel.setSingleModel( iSynthViewIdx, m_cCameraData.getSynthViewShiftLUTI()[iLeftBaseViewIdx ][iSynthViewIdx], NULL, NULL, NULL, -1, NULL);
cCurModel.getSynthVideo ( iSynthViewIdx, VIEWPOS_LEFT, pcPicYuvSynthOut );
}
break;
/// EXTRAPOLATION FROM RIGHT
case 2: // extrapolation from right
if ( !bHasRView ) // View to render is BaseView
{
std::cout << "Copied Frame " << iFrame << " of BaseView " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
apcPicYuvBaseVideo[iRightBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); // Copy Original
}
else // Render
{
std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
cCurModel.setSingleModel( iSynthViewIdx, NULL , NULL, m_cCameraData.getSynthViewShiftLUTI()[iRightBaseViewIdx ][iSynthViewIdx], NULL, -1, NULL);
cCurModel.getSynthVideo ( iSynthViewIdx, VIEWPOS_RIGHT, pcPicYuvSynthOut );
}
break;
}
// Write Output
m_apcTVideoIOYuvSynthOutput[m_bSweep ? 0 : iSynthViewIdx]->write( pcPicYuvSynthOut, 0, 0, 0, 0 );
}
iFrame++;
iNumOfRenderedFrames++;
}
// Delete Buffers
for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ )
{
apcPicYuvBaseVideo[uiBaseView]->destroy();
delete apcPicYuvBaseVideo[uiBaseView];
apcPicYuvBaseDepth[uiBaseView]->destroy();
delete apcPicYuvBaseDepth[uiBaseView];
}
pcPicYuvSynthOut->destroy();
delete pcPicYuvSynthOut;
xDestroyLib();
}
示例3: render
//.........这里部分代码省略.........
/// EXTRAPOLATION FROM LEFT
case 1:
if ( !bHasLView ) // View to render is BaseView
{
bRender = false;
}
if ( bIsBaseView )
{
AOF( iLeftBaseViewIdx == iRightBaseViewIdx );
Int iSortedBaseViewIdx = m_cCameraData.getBaseId2SortedId() [iLeftBaseViewIdx];
if ( iSortedBaseViewIdx - 1 >= 0 )
{
iLeftBaseViewIdx = m_cCameraData.getBaseSortedId2Id()[ iSortedBaseViewIdx - 1];
}
else
{
std::cout << "Copied Frame " << iFrame << " of BaseView " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
apcPicYuvBaseVideo[iLeftBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); // Copy Original
bRender = false;
}
}
if (bRender)
{
std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
m_pcRenTop->setShiftLUTs( m_cCameraData.getSynthViewShiftLUTD()[iLeftBaseViewIdx ][iSynthViewIdx],
m_cCameraData.getSynthViewShiftLUTI()[iLeftBaseViewIdx ][iSynthViewIdx], NULL, NULL, NULL, NULL, -1 );
m_pcRenTop->extrapolateView( apcPicYuvBaseVideo[iLeftBaseViewIdx ], apcPicYuvBaseDepth[iLeftBaseViewIdx ], pcPicYuvSynthOut, true );
}
break;
/// EXTRAPOLATION FROM RIGHT
case 2: // extrapolation from right
if ( !bHasRView ) // View to render is BaseView
{
bRender = false;
}
if ( bIsBaseView )
{
AOF( iLeftBaseViewIdx == iRightBaseViewIdx );
Int iSortedBaseViewIdx = m_cCameraData.getBaseId2SortedId() [iLeftBaseViewIdx];
if ( iSortedBaseViewIdx + 1 < m_iNumberOfInputViews )
{
iRightBaseViewIdx = m_cCameraData.getBaseSortedId2Id()[ iSortedBaseViewIdx + 1];
}
else
{
std::cout << "Copied Frame " << iFrame << " of BaseView " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
apcPicYuvBaseVideo[iLeftBaseViewIdx]->copyToPic( pcPicYuvSynthOut ); // Copy Original
bRender = false;
}
}
if ( bRender )
{
std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
m_pcRenTop->setShiftLUTs( NULL, NULL,NULL, m_cCameraData.getSynthViewShiftLUTD()[iRightBaseViewIdx ][iSynthViewIdx],
m_cCameraData.getSynthViewShiftLUTI()[iRightBaseViewIdx ][iSynthViewIdx],NULL, iRelDistToLeft);
m_pcRenTop->extrapolateView( apcPicYuvBaseVideo[iRightBaseViewIdx ], apcPicYuvBaseDepth[iRightBaseViewIdx ], pcPicYuvSynthOut, false);
}
break;
}
// Write Output
m_apcTVideoIOYuvSynthOutput[m_bSweep ? 0 : iSynthViewIdx]->write( pcPicYuvSynthOut, 0, 0, 0, 0 );
}
iFrame++;
iNumOfRenderedFrames++;
}
// Delete Buffers
for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ )
{
apcPicYuvBaseVideo[uiBaseView]->destroy();
delete apcPicYuvBaseVideo[uiBaseView];
apcPicYuvBaseDepth[uiBaseView]->destroy();
delete apcPicYuvBaseDepth[uiBaseView];
// Temporal Filter
if ( m_bTempDepthFilter )
{
apcPicYuvLastBaseVideo[uiBaseView]->destroy();
delete apcPicYuvLastBaseVideo[uiBaseView];
apcPicYuvLastBaseDepth[uiBaseView]->destroy();
delete apcPicYuvLastBaseDepth[uiBaseView];
}
}
pcPicYuvSynthOut->destroy();
delete pcPicYuvSynthOut;
xDestroyLib();
}
示例4: xRenderModelFromString
//.........这里部分代码省略.........
m_cCameraData.update( (UInt) ( iFrame - m_iFrameSkip ));
for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
{
// setup virtual views
Int iBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iBaseViewIdx];
cCurModel.setErrorMode( iBaseViewSIdx, 1, 0 );
Int iNumOfSV = m_cRenModStrParser.getNumOfModelsForView( iBaseViewSIdx, 1);
for (Int iCurView = 0; iCurView < iNumOfSV; iCurView++ )
{
Int iOrgRefBaseViewSIdx;
Int iLeftBaseViewSIdx;
Int iRightBaseViewSIdx;
Int iSynthViewRelNum;
Int iModelNum;
Int iBlendMode;
m_cRenModStrParser.getSingleModelData(iBaseViewSIdx, 1, iCurView, iModelNum, iBlendMode, iLeftBaseViewSIdx, iRightBaseViewSIdx, iOrgRefBaseViewSIdx, iSynthViewRelNum );
Int iLeftBaseViewIdx = -1;
Int iRightBaseViewIdx = -1;
TComPicYuv* pcPicYuvOrgRef = NULL;
Int** ppiShiftLUTLeft = NULL;
Int** ppiShiftLUTRight = NULL;
Int** ppiBaseShiftLUTLeft = NULL;
Int** ppiBaseShiftLUTRight = NULL;
Int iDistToLeft = -1;
Int iSynthViewIdx = m_cCameraData.synthRelNum2Idx( iSynthViewRelNum );
if ( iLeftBaseViewSIdx != -1 )
{
iLeftBaseViewIdx = m_cCameraData.getBaseSortedId2Id() [ iLeftBaseViewSIdx ];
ppiShiftLUTLeft = m_cCameraData.getSynthViewShiftLUTI()[ iLeftBaseViewIdx ][ iSynthViewIdx ];
}
if ( iRightBaseViewSIdx != -1 )
{
iRightBaseViewIdx = m_cCameraData.getBaseSortedId2Id() [iRightBaseViewSIdx ];
ppiShiftLUTRight = m_cCameraData.getSynthViewShiftLUTI()[ iRightBaseViewIdx ][ iSynthViewIdx ];
}
if ( iRightBaseViewSIdx != -1 && iLeftBaseViewSIdx != -1 )
{
ppiBaseShiftLUTLeft = m_cCameraData.getBaseViewShiftLUTI() [ iLeftBaseViewIdx ][ iRightBaseViewIdx ];
ppiBaseShiftLUTRight = m_cCameraData.getBaseViewShiftLUTI() [ iRightBaseViewIdx ][ iLeftBaseViewIdx ];
iDistToLeft = m_cCameraData.getRelDistLeft( iSynthViewIdx , iLeftBaseViewIdx, iRightBaseViewIdx);
}
std::cout << "Rendering Frame " << iFrame << " of View " << (Double) m_cCameraData.getSynthViewNumbers()[iSynthViewIdx] / VIEW_NUM_PREC << std::endl;
cCurModel.setSingleModel( iModelNum, ppiShiftLUTLeft, ppiBaseShiftLUTLeft, ppiShiftLUTRight, ppiBaseShiftLUTRight, iDistToLeft, pcPicYuvOrgRef );
Int iViewPos;
if (iLeftBaseViewSIdx != -1 && iRightBaseViewSIdx != -1)
{
iViewPos = VIEWPOS_MERGED;
}
else if ( iLeftBaseViewSIdx != -1 )
{
iViewPos = VIEWPOS_LEFT;
}
else if ( iRightBaseViewSIdx != -1 )
{
iViewPos = VIEWPOS_RIGHT;
}
else
{
AOT(true);
}
cCurModel.getSynthVideo ( iModelNum, iViewPos, pcPicYuvSynthOut );
// Write Output
m_apcTVideoIOYuvSynthOutput[m_bSweep ? 0 : iModelNum]->write( pcPicYuvSynthOut, 0 ,0 ,0, 0 );
}
}
iFrame++;
iNumOfRenderedFrames++;
}
// Delete Buffers
for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ )
{
apcPicYuvBaseVideo[uiBaseView]->destroy();
delete apcPicYuvBaseVideo[uiBaseView];
apcPicYuvBaseDepth[uiBaseView]->destroy();
delete apcPicYuvBaseDepth[uiBaseView];
}
pcPicYuvSynthOut->destroy();
delete pcPicYuvSynthOut;
xDestroyLib();
}
示例5: main
//.........这里部分代码省略.........
opts.addOptions()
("help", do_help, false, "this help text")
("InputFile,i", filename_in, string(""), "input file to convert")
("OutputFile,o", filename_out, string(""), "output file")
("SourceWidth", width, 0u, "source picture width")
("SourceHeight", height, 0u, "source picture height")
("InputBitDepth", bitdepth_in, 8u, "bit-depth of input file")
("OutputBitDepth", bitdepth_out, 8u, "bit-depth of output file")
("ChromaFormat", chromaFormatRaw, 420u, "chroma format. 400, 420, 422 or 444 only")
("NumFrames", num_frames, 0xffffffffu, "number of frames to process")
("FrameSkip,-fs", num_frames_skip, 0u, "Number of frames to skip at start of input YUV")
;
po::setDefaults(opts);
po::scanArgv(opts, argc, argv);
if (argc == 1 || do_help)
{
/* argc == 1: no options have been specified */
po::doHelp(cout, opts);
return EXIT_FAILURE;
}
ChromaFormat chromaFormatIDC=CHROMA_420;
switch (chromaFormatRaw)
{
case 400:
chromaFormatIDC=CHROMA_400;
break;
case 420:
chromaFormatIDC=CHROMA_420;
break;
case 422:
chromaFormatIDC=CHROMA_422;
break;
case 444:
chromaFormatIDC=CHROMA_444;
break;
default:
fprintf(stderr, "Bad chroma format string\n");
return EXIT_FAILURE;
}
TVideoIOYuv input;
TVideoIOYuv output;
Int inputBitDepths [MAX_NUM_CHANNEL_TYPE];
Int outputBitDepths[MAX_NUM_CHANNEL_TYPE];
for (UInt channelTypeIndex = 0; channelTypeIndex < MAX_NUM_CHANNEL_TYPE; channelTypeIndex++)
{
inputBitDepths [channelTypeIndex] = bitdepth_in;
outputBitDepths[channelTypeIndex] = bitdepth_out;
}
input.open((char*)filename_in.c_str(), false, inputBitDepths, inputBitDepths, outputBitDepths);
output.open((char*)filename_out.c_str(), true, outputBitDepths, outputBitDepths, outputBitDepths);
input.skipFrames(num_frames_skip, width, height, chromaFormatIDC);
TComPicYuv frame;
frame.create( width, height, chromaFormatIDC, 1, 1, 0 );
Int pad[2] = {0, 0};
TComPicYuv cPicYuvTrueOrg;
cPicYuvTrueOrg.create( width, height, chromaFormatIDC, 1, 1, 0 );
UInt num_frames_processed = 0;
while (!input.isEof())
{
if (! input.read(&frame, &cPicYuvTrueOrg, IPCOLOURSPACE_UNCHANGED, pad))
{
break;
}
#if 0
Pel* img = frame.getAddr(COMPONENT_Y);
for (Int y = 0; y < height; y++)
{
for (Int x = 0; x < height; x++)
img[x] = 0;
img += frame.getStride();
}
img = frame.getAddr(COMPONENT_Y);
img[0] = 1;
#endif
output.write(&frame, IPCOLOURSPACE_UNCHANGED);
num_frames_processed++;
if (num_frames_processed == num_frames)
break;
}
input.close();
output.close();
cPicYuvTrueOrg.destroy();
return EXIT_SUCCESS;
}