本文整理汇总了C++中TComPicYuv::create方法的典型用法代码示例。如果您正苦于以下问题:C++ TComPicYuv::create方法的具体用法?C++ TComPicYuv::create怎么用?C++ TComPicYuv::create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TComPicYuv
的用法示例。
在下文中一共展示了TComPicYuv::create方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: xRenderModelFromNums
Void TAppRendererTop::xRenderModelFromNums()
{
xCreateLib();
xInitLib();
// Create Buffers Input Views;
std::vector<TComPicYuv*> apcPicYuvBaseVideo;
std::vector<TComPicYuv*> apcPicYuvBaseDepth;
Int aiPad[2] = { 0, 0 };
// Init Model
TRenModel cCurModel;
AOT( m_iLog2SamplingFactor != 0 );
cCurModel.setupPart( 0, m_iSourceHeight );
#if H_3D_VSO_EARLY_SKIP
cCurModel.create( m_iNumberOfInputViews, m_iNumberOfOutputViews, m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin, false );
#else
cCurModel.create( m_iNumberOfInputViews, m_iNumberOfOutputViews, m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin );
#endif
for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ )
{
TComPicYuv* pcNewVideoPic = new TComPicYuv;
TComPicYuv* pcNewDepthPic = new TComPicYuv;
pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvBaseVideo.push_back(pcNewVideoPic);
pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvBaseDepth.push_back(pcNewDepthPic);
}
for(Int iSynthViewIdx=0; iSynthViewIdx < m_iNumberOfOutputViews; iSynthViewIdx++ )
{
Int iLeftBaseViewIdx = -1;
Int iRightBaseViewIdx = -1;
Bool bIsBaseView = false;
Int iRelDistToLeft;
m_cCameraData.getLeftRightBaseView( iSynthViewIdx, iLeftBaseViewIdx, iRightBaseViewIdx, iRelDistToLeft, bIsBaseView );
if (m_iRenderDirection == 1 )
{
iRightBaseViewIdx = -1;
AOT( iLeftBaseViewIdx == -1);
}
if (m_iRenderDirection == 2 )
{
iLeftBaseViewIdx = -1;
AOT( iRightBaseViewIdx == -1);
}
Int iLeftBaseViewSIdx = -1;
Int iRightBaseViewSIdx = -1;
if (iLeftBaseViewIdx != -1 )
{
iLeftBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iLeftBaseViewIdx];
}
if (iRightBaseViewIdx != -1 )
{
iRightBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iRightBaseViewIdx];
}
cCurModel.createSingleModel(-1, -1, iSynthViewIdx, iLeftBaseViewSIdx, iRightBaseViewSIdx, false, m_iBlendMode );
}
// Create Buffer for synthesized View
TComPicYuv* pcPicYuvSynthOut = new TComPicYuv;
pcPicYuvSynthOut->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
Bool bAnyEOS = false;
Int iNumOfRenderedFrames = 0;
Int iFrame = 0;
while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS )
{
if ( iFrame >= m_iFrameSkip )
{
// read in depth and video
for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
{
m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad ) ;
bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad ) ;
bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
if ( iFrame >= m_iFrameSkip )
{
Int iBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iBaseViewIdx];
cCurModel.setBaseView( iBaseViewSIdx, apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], NULL, NULL );
}
}
//.........这里部分代码省略.........
示例2: renderUsedPelsMap
Void TAppRendererTop::renderUsedPelsMap( )
{
xCreateLib();
xInitLib();
// Create Buffers Input Views;
std::vector<TComPicYuv*> apcPicYuvBaseVideo;
std::vector<TComPicYuv*> apcPicYuvBaseDepth;
// TemporalImprovement Filter
std::vector<TComPicYuv*> apcPicYuvLastBaseVideo;
std::vector<TComPicYuv*> apcPicYuvLastBaseDepth;
Int aiPad[2] = { 0, 0 };
for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ )
{
TComPicYuv* pcNewVideoPic = new TComPicYuv;
TComPicYuv* pcNewDepthPic = new TComPicYuv;
pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvBaseVideo.push_back(pcNewVideoPic);
pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvBaseDepth.push_back(pcNewDepthPic);
//Temporal improvement Filter
if ( m_bTempDepthFilter )
{
pcNewVideoPic = new TComPicYuv;
pcNewDepthPic = new TComPicYuv;
pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvLastBaseVideo.push_back(pcNewVideoPic);
pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvLastBaseDepth.push_back(pcNewDepthPic);
}
}
// Create Buffer for synthesized View
TComPicYuv* pcPicYuvSynthOut = new TComPicYuv;
pcPicYuvSynthOut->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
Bool bAnyEOS = false;
Int iNumOfRenderedFrames = 0;
Int iFrame = 0;
while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS )
{
if ( iFrame >= m_iFrameSkip )
{
// read in depth and video
for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
{
m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad ) ;
apcPicYuvBaseVideo[iBaseViewIdx]->extendPicBorder();
bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad ) ;
apcPicYuvBaseDepth[iBaseViewIdx]->extendPicBorder();
bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
if ( m_bTempDepthFilter && (iFrame >= m_iFrameSkip) )
{
m_pcRenTop->temporalFilterVSRS( apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], apcPicYuvLastBaseVideo[iBaseViewIdx], apcPicYuvLastBaseDepth[iBaseViewIdx], ( iFrame == m_iFrameSkip) );
}
}
}
else
{
std::cout << "Skipping Frame " << iFrame << std::endl;
iFrame++;
continue;
}
m_cCameraData.update( (UInt) ( iFrame - m_iFrameSkip ) );
for(Int iViewIdx=1; iViewIdx < m_iNumberOfInputViews; iViewIdx++ )
{
std::cout << "Rendering UsedPelsMap for Frame " << iFrame << " of View " << (Double) m_cCameraData.getBaseViewNumbers()[iViewIdx] << std::endl;
Int iViewSIdx = m_cCameraData.getBaseId2SortedId()[iViewIdx];
Int iFirstViewSIdx = m_cCameraData.getBaseId2SortedId()[0];
AOT( iViewSIdx == iFirstViewSIdx );
Bool bFirstIsLeft = (iFirstViewSIdx < iViewSIdx);
m_pcRenTop->setShiftLUTs(
m_cCameraData.getBaseViewShiftLUTD()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTD()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
-1
);
//.........这里部分代码省略.........
示例3: xRenderModelFromString
Void TAppRendererTop::xRenderModelFromString()
{
xCreateLib();
xInitLib();
// Create Buffers Input Views;
std::vector<TComPicYuv*> apcPicYuvBaseVideo;
std::vector<TComPicYuv*> apcPicYuvBaseDepth;
for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ )
{
TComPicYuv* pcNewVideoPic = new TComPicYuv;
TComPicYuv* pcNewDepthPic = new TComPicYuv;
pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvBaseVideo.push_back(pcNewVideoPic);
pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvBaseDepth.push_back(pcNewDepthPic);
}
Int aiPad[2] = { 0, 0 };
// Init Model
TRenModel cCurModel;
AOT( m_iLog2SamplingFactor != 0 );
#if H_3D_VSO_EARLY_SKIP
cCurModel.create( m_cRenModStrParser.getNumOfBaseViews(), m_cRenModStrParser.getNumOfModels(), m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin, false );
#else
cCurModel.create( m_cRenModStrParser.getNumOfBaseViews(), m_cRenModStrParser.getNumOfModels(), m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin );
#endif
cCurModel.setupPart( 0, m_iSourceHeight );
for ( Int iViewIdx = 0; iViewIdx < m_iNumberOfInputViews; iViewIdx++ )
{
Int iNumOfModels = m_cRenModStrParser.getNumOfModelsForView(iViewIdx, 1);
for (Int iCurModel = 0; iCurModel < iNumOfModels; iCurModel++ )
{
Int iModelNum; Int iLeftViewNum; Int iRightViewNum; Int iDump; Int iOrgRefNum; Int iBlendMode;
m_cRenModStrParser.getSingleModelData ( iViewIdx, 1, iCurModel, iModelNum, iBlendMode, iLeftViewNum, iRightViewNum, iOrgRefNum, iDump ) ;
cCurModel .createSingleModel ( iViewIdx, 1, iModelNum, iLeftViewNum, iRightViewNum, false, iBlendMode );
}
}
// Create Buffer for synthesized View
TComPicYuv* pcPicYuvSynthOut = new TComPicYuv;
pcPicYuvSynthOut->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
Bool bAnyEOS = false;
Int iNumOfRenderedFrames = 0;
Int iFrame = 0;
while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS )
{
if ( iFrame >= m_iFrameSkip )
{
// read in depth and video
for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
{
m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad ) ;
bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad ) ;
bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
}
}
else
{
iFrame++;
continue;
}
for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
{
TComPicYuv* pcPicYuvVideo = apcPicYuvBaseVideo[iBaseViewIdx];
TComPicYuv* pcPicYuvDepth = apcPicYuvBaseDepth[iBaseViewIdx];
Int iBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iBaseViewIdx ];
cCurModel.setBaseView( iBaseViewSIdx, pcPicYuvVideo, pcPicYuvDepth, NULL, NULL );
}
m_cCameraData.update( (UInt) ( iFrame - m_iFrameSkip ));
for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
{
// setup virtual views
Int iBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iBaseViewIdx];
cCurModel.setErrorMode( iBaseViewSIdx, 1, 0 );
Int iNumOfSV = m_cRenModStrParser.getNumOfModelsForView( iBaseViewSIdx, 1);
for (Int iCurView = 0; iCurView < iNumOfSV; iCurView++ )
{
Int iOrgRefBaseViewSIdx;
//.........这里部分代码省略.........
示例4: render
Void TAppRendererTop::render()
{
xCreateLib();
xInitLib();
// Create Buffers Input Views;
std::vector<TComPicYuv*> apcPicYuvBaseVideo;
std::vector<TComPicYuv*> apcPicYuvBaseDepth;
// TemporalImprovement Filter
std::vector<TComPicYuv*> apcPicYuvLastBaseVideo;
std::vector<TComPicYuv*> apcPicYuvLastBaseDepth;
Int aiPad[2] = { 0, 0 };
for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ )
{
TComPicYuv* pcNewVideoPic = new TComPicYuv;
TComPicYuv* pcNewDepthPic = new TComPicYuv;
pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvBaseVideo.push_back(pcNewVideoPic);
pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvBaseDepth.push_back(pcNewDepthPic);
//Temporal improvement Filter
if ( m_bTempDepthFilter )
{
pcNewVideoPic = new TComPicYuv;
pcNewDepthPic = new TComPicYuv;
pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvLastBaseVideo.push_back(pcNewVideoPic);
pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvLastBaseDepth.push_back(pcNewDepthPic);
}
}
// Create Buffer for synthesized View
TComPicYuv* pcPicYuvSynthOut = new TComPicYuv;
pcPicYuvSynthOut->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
Bool bAnyEOS = false;
Int iNumOfRenderedFrames = 0;
Int iFrame = 0;
while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS )
{
if ( iFrame >= m_iFrameSkip )
{
// read in depth and video
for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
{
m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad ) ;
apcPicYuvBaseVideo[iBaseViewIdx]->extendPicBorder();
bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad ) ;
apcPicYuvBaseDepth[iBaseViewIdx]->extendPicBorder();
bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
if ( m_bTempDepthFilter && (iFrame >= m_iFrameSkip) )
{
m_pcRenTop->temporalFilterVSRS( apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], apcPicYuvLastBaseVideo[iBaseViewIdx], apcPicYuvLastBaseDepth[iBaseViewIdx], ( iFrame == m_iFrameSkip) );
}
}
}
else
{
std::cout << "Skipping Frame " << iFrame << std::endl;
iFrame++;
continue;
}
m_cCameraData.update( (UInt)iFrame - m_iFrameSkip );
for(Int iSynthViewIdx=0; iSynthViewIdx < m_iNumberOfOutputViews; iSynthViewIdx++ )
{
Int iLeftBaseViewIdx = -1;
Int iRightBaseViewIdx = -1;
Bool bIsBaseView = false;
Int iRelDistToLeft;
Bool bHasLRView = m_cCameraData.getLeftRightBaseView( iSynthViewIdx, iLeftBaseViewIdx, iRightBaseViewIdx, iRelDistToLeft, bIsBaseView );
Bool bHasLView = ( iLeftBaseViewIdx != -1 );
Bool bHasRView = ( iRightBaseViewIdx != -1 );
Bool bRender = true;
Int iBlendMode = m_iBlendMode;
Int iSimEnhBaseView = 0;
switch( m_iRenderDirection )
{
//.........这里部分代码省略.........
示例5: main
int main(int argc, const char** argv)
{
bool do_help;
string filename_in, filename_out;
unsigned int width, height;
unsigned int bitdepth_in, bitdepth_out;
unsigned int num_frames;
unsigned int num_frames_skip;
po::Options opts;
opts.addOptions()
("help", do_help, false, "this help text")
("InputFile,i", filename_in, string(""), "input file to convert")
("OutputFile,o", filename_out, string(""), "output file")
("SourceWidth", width, 0u, "source picture width")
("SourceHeight", height, 0u, "source picture height")
("InputBitDepth", bitdepth_in, 8u, "bit-depth of input file")
("OutputBitDepth", bitdepth_out, 8u, "bit-depth of output file")
("NumFrames", num_frames, 0xffffffffu, "number of frames to process")
("FrameSkip,-fs", num_frames_skip, 0u, "Number of frames to skip at start of input YUV")
;
po::setDefaults(opts);
po::scanArgv(opts, argc, argv);
if (argc == 1 || do_help)
{
/* argc == 1: no options have been specified */
po::doHelp(cout, opts);
return EXIT_FAILURE;
}
TVideoIOYuv input;
TVideoIOYuv output;
input.open((char*)filename_in.c_str(), false, bitdepth_in, bitdepth_in, bitdepth_out, bitdepth_out);
output.open((char*)filename_out.c_str(), true, bitdepth_out, bitdepth_out, bitdepth_out, bitdepth_out);
input.skipFrames(num_frames_skip, width, height);
TComPicYuv frame;
frame.create( width, height, 1, 1, 0 );
int pad[2] = {0, 0};
unsigned int num_frames_processed = 0;
while (!input.isEof())
{
if (! input.read(&frame, pad))
{
break;
}
#if 0
Pel* img = frame.getLumaAddr();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < height; x++)
img[x] = 0;
img += frame.getStride();
}
img = frame.getLumaAddr();
img[0] = 1;
#endif
output.write(&frame);
num_frames_processed++;
if (num_frames_processed == num_frames)
break;
}
input.close();
output.close();
return EXIT_SUCCESS;
}
示例6: main
Int main(Int argc, const char** argv)
{
Bool do_help;
string filename_in, filename_out;
UInt width, height;
UInt bitdepth_in, bitdepth_out, chromaFormatRaw;
UInt num_frames;
UInt num_frames_skip;
po::Options opts;
opts.addOptions()
("help", do_help, false, "this help text")
("InputFile,i", filename_in, string(""), "input file to convert")
("OutputFile,o", filename_out, string(""), "output file")
("SourceWidth", width, 0u, "source picture width")
("SourceHeight", height, 0u, "source picture height")
("InputBitDepth", bitdepth_in, 8u, "bit-depth of input file")
("OutputBitDepth", bitdepth_out, 8u, "bit-depth of output file")
("ChromaFormat", chromaFormatRaw, 420u, "chroma format. 400, 420, 422 or 444 only")
("NumFrames", num_frames, 0xffffffffu, "number of frames to process")
("FrameSkip,-fs", num_frames_skip, 0u, "Number of frames to skip at start of input YUV")
;
po::setDefaults(opts);
po::scanArgv(opts, argc, argv);
if (argc == 1 || do_help)
{
/* argc == 1: no options have been specified */
po::doHelp(cout, opts);
return EXIT_FAILURE;
}
ChromaFormat chromaFormatIDC=CHROMA_420;
switch (chromaFormatRaw)
{
case 400:
chromaFormatIDC=CHROMA_400;
break;
case 420:
chromaFormatIDC=CHROMA_420;
break;
case 422:
chromaFormatIDC=CHROMA_422;
break;
case 444:
chromaFormatIDC=CHROMA_444;
break;
default:
fprintf(stderr, "Bad chroma format string\n");
return EXIT_FAILURE;
}
TVideoIOYuv input;
TVideoIOYuv output;
Int inputBitDepths [MAX_NUM_CHANNEL_TYPE];
Int outputBitDepths[MAX_NUM_CHANNEL_TYPE];
for (UInt channelTypeIndex = 0; channelTypeIndex < MAX_NUM_CHANNEL_TYPE; channelTypeIndex++)
{
inputBitDepths [channelTypeIndex] = bitdepth_in;
outputBitDepths[channelTypeIndex] = bitdepth_out;
}
input.open((char*)filename_in.c_str(), false, inputBitDepths, inputBitDepths, outputBitDepths);
output.open((char*)filename_out.c_str(), true, outputBitDepths, outputBitDepths, outputBitDepths);
input.skipFrames(num_frames_skip, width, height, chromaFormatIDC);
TComPicYuv frame;
frame.create( width, height, chromaFormatIDC, 1, 1, 0 );
Int pad[2] = {0, 0};
TComPicYuv cPicYuvTrueOrg;
cPicYuvTrueOrg.create( width, height, chromaFormatIDC, 1, 1, 0 );
UInt num_frames_processed = 0;
while (!input.isEof())
{
if (! input.read(&frame, &cPicYuvTrueOrg, IPCOLOURSPACE_UNCHANGED, pad))
{
break;
}
#if 0
Pel* img = frame.getAddr(COMPONENT_Y);
for (Int y = 0; y < height; y++)
{
for (Int x = 0; x < height; x++)
img[x] = 0;
img += frame.getStride();
}
img = frame.getAddr(COMPONENT_Y);
img[0] = 1;
#endif
output.write(&frame, IPCOLOURSPACE_UNCHANGED);
num_frames_processed++;
//.........这里部分代码省略.........
示例7: encode
Void TEncTop::encode(Bool flush, TComPicYuv* pcPicYuvOrg, TComPicYuv* pcPicYuvTrueOrg, const InputColourSpaceConversion snrCSC, TComList<TComPicYuv*>& rcListPicYuvRecOut, std::list<AccessUnit>& accessUnitsOut, Int& iNumEncoded, Bool isTff)
{
iNumEncoded = 0;
for (Int fieldNum=0; fieldNum<2; fieldNum++)
{
if (pcPicYuvOrg)
{
/* -- field initialization -- */
const Bool isTopField=isTff==(fieldNum==0);
TComPic *pcField;
xGetNewPicBuffer( pcField );
pcField->setReconMark (false); // where is this normally?
if (fieldNum==1) // where is this normally?
{
TComPicYuv* rpcPicYuvRec;
// org. buffer
if ( rcListPicYuvRecOut.size() >= (UInt)m_iGOPSize+1 ) // need to maintain field 0 in list of RecOuts while processing field 1. Hence +1 on m_iGOPSize.
{
rpcPicYuvRec = rcListPicYuvRecOut.popFront();
}
else
{
rpcPicYuvRec = new TComPicYuv;
rpcPicYuvRec->create( m_iSourceWidth, m_iSourceHeight, m_chromaFormatIDC, m_maxCUWidth, m_maxCUHeight, m_maxTotalCUDepth, true);
}
rcListPicYuvRecOut.pushBack( rpcPicYuvRec );
}
pcField->getSlice(0)->setPOC( m_iPOCLast ); // superfluous?
pcField->getPicYuvRec()->setBorderExtension(false);// where is this normally?
pcField->setTopField(isTopField); // interlaced requirement
for (UInt componentIndex = 0; componentIndex < pcPicYuvOrg->getNumberValidComponents(); componentIndex++)
{
const ComponentID component = ComponentID(componentIndex);
const UInt stride = pcPicYuvOrg->getStride(component);
separateFields((pcPicYuvOrg->getBuf(component) + pcPicYuvOrg->getMarginX(component) + (pcPicYuvOrg->getMarginY(component) * stride)),
pcField->getPicYuvOrg()->getAddr(component),
pcPicYuvOrg->getStride(component),
pcPicYuvOrg->getWidth(component),
pcPicYuvOrg->getHeight(component),
isTopField);
separateFields((pcPicYuvTrueOrg->getBuf(component) + pcPicYuvTrueOrg->getMarginX(component) + (pcPicYuvTrueOrg->getMarginY(component) * stride)),
pcField->getPicYuvTrueOrg()->getAddr(component),
pcPicYuvTrueOrg->getStride(component),
pcPicYuvTrueOrg->getWidth(component),
pcPicYuvTrueOrg->getHeight(component),
isTopField);
}
// compute image characteristics
if ( getUseAdaptiveQP() )
{
m_cPreanalyzer.xPreanalyze( dynamic_cast<TEncPic*>( pcField ) );
}
}
if ( m_iNumPicRcvd && ((flush&&fieldNum==1) || (m_iPOCLast/2)==0 || m_iNumPicRcvd==m_iGOPSize ) )
{
// compress GOP
m_cGOPEncoder.compressGOP(m_iPOCLast, m_iNumPicRcvd, m_cListPic, rcListPicYuvRecOut, accessUnitsOut, true, isTff, snrCSC, m_printFrameMSE,&m_cSearch);
iNumEncoded += m_iNumPicRcvd;
m_uiNumAllPicCoded += m_iNumPicRcvd;
m_iNumPicRcvd = 0;
}
}
}