本文整理汇总了C++中TComPicYuv类的典型用法代码示例。如果您正苦于以下问题:C++ TComPicYuv类的具体用法?C++ TComPicYuv怎么用?C++ TComPicYuv使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TComPicYuv类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setLambda
void FrameEncoder::setLambda(int qp, int row)
{
TComSlice* slice = m_pic->getSlice();
TComPicYuv *fenc = slice->getPic()->getPicYuvOrg();
double lambda = 0;
if (m_pic->getSlice()->getSliceType() == I_SLICE)
{
lambda = X265_MAX(1, x265_lambda2_tab_I[qp]);
}
else
{
lambda = X265_MAX(1, x265_lambda2_non_I[qp]);
}
// for RDO
// in RdCost there is only one lambda because the luma and chroma bits are not separated,
// instead we weight the distortion of chroma.
int chromaQPOffset = slice->getPPS()->getChromaCbQpOffset() + slice->getSliceQpDeltaCb();
int qpc = Clip3(0, 70, qp + chromaQPOffset);
double cbWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset
chromaQPOffset = slice->getPPS()->getChromaCrQpOffset() + slice->getSliceQpDeltaCr();
qpc = Clip3(0, 70, qp + chromaQPOffset);
double crWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset
double chromaLambda = lambda / crWeight;
m_rows[row].m_search.setQPLambda(qp, lambda, chromaLambda);
m_rows[row].m_search.m_me.setSourcePlane(fenc->getLumaAddr(), fenc->getStride());
m_rows[row].m_rdCost.setLambda(lambda);
m_rows[row].m_rdCost.setCbDistortionWeight(cbWeight);
m_rows[row].m_rdCost.setCrDistortionWeight(crWeight);
}
示例2: updatePixel
Void updatePixel(TComDataCU* pCtu, Pixel** ppPixel)
{
UInt uiMaxCUWidth = pCtu->getSlice()->getSPS()->getMaxCUWidth(); // max cu width
UInt uiMaxCUHeight = pCtu->getSlice()->getSPS()->getMaxCUHeight(); // max cu height
// pic
TComPic *pcPic = pCtu->getPic();
//TComPicYuv* pcPredYuv = pcPic->getPicYuvPred();
//TComPicYuv* pcResiYuv = pcPic->getPicYuvResi();
TComPicYuv* pcRecoYuv = pcPic->getPicYuvRec();
UInt uiNumValidCopmonent = pcPic->getNumberValidComponents();
for (UInt ch = 0; ch < uiNumValidCopmonent; ch++)
{
ComponentID cId = ComponentID(ch);
// picture description
UInt uiStride = pcRecoYuv->getStride(cId); // stride for a certain component
UInt uiPicWidth = pcRecoYuv->getWidth(cId); // picture width for a certain component
UInt uiPicHeight = pcRecoYuv->getHeight(cId); // picture height for a certain component
UInt uiCUPelX = pCtu->getCUPelX() >> (pcRecoYuv->getComponentScaleX(cId)); // x of upper left corner of the cu
UInt uiCUPelY = pCtu->getCUPelY() >> (pcRecoYuv->getComponentScaleY(cId));; // y of upper left corner of the
UInt uiCBWidth = uiMaxCUWidth >> (pcRecoYuv->getComponentScaleX(cId)); // code block width for a certain component
UInt uiCBHeight = uiMaxCUHeight >> (pcRecoYuv->getComponentScaleY(cId)); // code block height for a certain component
// rectangle of the code block
UInt uiTopX = Clip3((UInt)0, uiPicWidth, uiCUPelX);
UInt uiTopY = Clip3((UInt)0, uiPicHeight, uiCUPelY);
UInt uiBottomX = Clip3((UInt)0, uiPicWidth, uiCUPelX + uiCBWidth);
UInt uiBottomY = Clip3((UInt)0, uiPicHeight, uiCUPelY + uiCBHeight);
Pel* pBuffer = pcRecoYuv->getAddr(cId);
for (UInt uiY = uiTopY; uiY < uiBottomY; uiY++)
{
for (UInt uiX = uiTopX; uiX < uiBottomX; uiX++)
{
UInt uiOrgX, uiOrgY;
uiOrgX = g_auiRsmpldToOrg[cId][0][uiX];
uiOrgY = g_auiRsmpldToOrg[cId][1][uiY];
Pixel* pPixel = ppPixel[cId] + getSerialIndex(uiOrgX, uiOrgY, uiPicWidth);
pPixel->m_bIsRec = true;
pPixel->m_uiReco = pBuffer[uiY*uiStride + uiX];
}
}
}
}
示例3: xCalcACDCParamSlice
//! calculate AC and DC values for current original image
Void WeightPredAnalysis::xCalcACDCParamSlice(TComSlice *const slice)
{
//===== calculate AC/DC value =====
TComPicYuv* pPic = slice->getPic()->getPicYuvOrg();
WPACDCParam weightACDCParam[MAX_NUM_COMPONENT];
for(Int componentIndex = 0; componentIndex < pPic->getNumberValidComponents(); componentIndex++)
{
const ComponentID compID = ComponentID(componentIndex);
// calculate DC/AC value for channel
const Int iStride = pPic->getStride(compID);
const Int iWidth = pPic->getWidth(compID);
const Int iHeight = pPic->getHeight(compID);
const Int iSample = iWidth*iHeight;
Int64 iOrgDC = 0;
{
const Pel *pPel = pPic->getAddr(compID);
for(Int y = 0; y < iHeight; y++, pPel+=iStride )
{
for(Int x = 0; x < iWidth; x++ )
{
iOrgDC += (Int)( pPel[x] );
}
}
}
const Int64 iOrgNormDC = ((iOrgDC+(iSample>>1)) / iSample);
Int64 iOrgAC = 0;
{
const Pel *pPel = pPic->getAddr(compID);
for(Int y = 0; y < iHeight; y++, pPel += iStride )
{
for(Int x = 0; x < iWidth; x++ )
{
iOrgAC += abs( (Int)pPel[x] - (Int)iOrgNormDC );
}
}
}
const Int fixedBitShift = (slice->getSPS()->getSpsRangeExtension().getHighPrecisionOffsetsEnabledFlag())?RExt__PREDICTION_WEIGHTING_ANALYSIS_DC_PRECISION:0;
weightACDCParam[compID].iDC = (((iOrgDC<<fixedBitShift)+(iSample>>1)) / iSample);
weightACDCParam[compID].iAC = iOrgAC;
}
slice->setWpAcDcParam(weightACDCParam);
}
示例4: calcMD5
/**
* Calculate the MD5sum of pic, storing the result in digest.
* MD5 calculation is performed on Y' then Cb, then Cr; each in raster order.
* Pel data is inserted into the MD5 function in little-endian byte order,
* using sufficient bytes to represent the picture bitdepth. Eg, 10bit data
* uses little-endian two byte words; 8bit data uses single byte words.
*/
void calcMD5(TComPicYuv& pic, unsigned char digest[16])
{
unsigned bitdepth = g_uiBitDepth + g_uiBitIncrement;
/* choose an md5_plane packing function based on the system bitdepth */
typedef void (*MD5PlaneFunc)(MD5&, const Pel*, unsigned, unsigned, unsigned);
MD5PlaneFunc md5_plane_func;
md5_plane_func = bitdepth <= 8 ? (MD5PlaneFunc)md5_plane<1> : (MD5PlaneFunc)md5_plane<2>;
MD5 md5;
unsigned width = pic.getWidth();
unsigned height = pic.getHeight();
unsigned stride = pic.getStride();
md5_plane_func(md5, pic.getLumaAddr(), width, height, stride);
width >>= 1;
height >>= 1;
stride >>= 1;
md5_plane_func(md5, pic.getCbAddr(), width, height, stride);
md5_plane_func(md5, pic.getCrAddr(), width, height, stride);
md5.finalize(digest);
}
示例5: xCreateLib
Void TAppRendererTop::renderUsedPelsMap( )
{
xCreateLib();
xInitLib();
// Create Buffers Input Views;
std::vector<TComPicYuv*> apcPicYuvBaseVideo;
std::vector<TComPicYuv*> apcPicYuvBaseDepth;
// TemporalImprovement Filter
std::vector<TComPicYuv*> apcPicYuvLastBaseVideo;
std::vector<TComPicYuv*> apcPicYuvLastBaseDepth;
Int aiPad[2] = { 0, 0 };
for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ )
{
TComPicYuv* pcNewVideoPic = new TComPicYuv;
TComPicYuv* pcNewDepthPic = new TComPicYuv;
pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvBaseVideo.push_back(pcNewVideoPic);
pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvBaseDepth.push_back(pcNewDepthPic);
//Temporal improvement Filter
if ( m_bTempDepthFilter )
{
pcNewVideoPic = new TComPicYuv;
pcNewDepthPic = new TComPicYuv;
pcNewVideoPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvLastBaseVideo.push_back(pcNewVideoPic);
pcNewDepthPic->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
apcPicYuvLastBaseDepth.push_back(pcNewDepthPic);
}
}
// Create Buffer for synthesized View
TComPicYuv* pcPicYuvSynthOut = new TComPicYuv;
pcPicYuvSynthOut->create( m_iSourceWidth, m_iSourceHeight, 1, 1, 1 );
Bool bAnyEOS = false;
Int iNumOfRenderedFrames = 0;
Int iFrame = 0;
while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS )
{
if ( iFrame >= m_iFrameSkip )
{
// read in depth and video
for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
{
m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad ) ;
apcPicYuvBaseVideo[iBaseViewIdx]->extendPicBorder();
bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad ) ;
apcPicYuvBaseDepth[iBaseViewIdx]->extendPicBorder();
bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
if ( m_bTempDepthFilter && (iFrame >= m_iFrameSkip) )
{
m_pcRenTop->temporalFilterVSRS( apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], apcPicYuvLastBaseVideo[iBaseViewIdx], apcPicYuvLastBaseDepth[iBaseViewIdx], ( iFrame == m_iFrameSkip) );
}
}
}
else
{
std::cout << "Skipping Frame " << iFrame << std::endl;
iFrame++;
continue;
}
m_cCameraData.update( (UInt) ( iFrame - m_iFrameSkip ) );
for(Int iViewIdx=1; iViewIdx < m_iNumberOfInputViews; iViewIdx++ )
{
std::cout << "Rendering UsedPelsMap for Frame " << iFrame << " of View " << (Double) m_cCameraData.getBaseViewNumbers()[iViewIdx] << std::endl;
Int iViewSIdx = m_cCameraData.getBaseId2SortedId()[iViewIdx];
Int iFirstViewSIdx = m_cCameraData.getBaseId2SortedId()[0];
AOT( iViewSIdx == iFirstViewSIdx );
Bool bFirstIsLeft = (iFirstViewSIdx < iViewSIdx);
m_pcRenTop->setShiftLUTs(
m_cCameraData.getBaseViewShiftLUTD()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTD()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
m_cCameraData.getBaseViewShiftLUTI()[0][iViewIdx],
-1
);
//.........这里部分代码省略.........
示例6: main
int main(int argc, const char** argv)
{
bool do_help;
string filename_in, filename_out;
unsigned int width, height;
unsigned int bitdepth_in, bitdepth_out;
unsigned int num_frames;
unsigned int num_frames_skip;
po::Options opts;
opts.addOptions()
("help", do_help, false, "this help text")
("InputFile,i", filename_in, string(""), "input file to convert")
("OutputFile,o", filename_out, string(""), "output file")
("SourceWidth", width, 0u, "source picture width")
("SourceHeight", height, 0u, "source picture height")
("InputBitDepth", bitdepth_in, 8u, "bit-depth of input file")
("OutputBitDepth", bitdepth_out, 8u, "bit-depth of output file")
("NumFrames", num_frames, 0xffffffffu, "number of frames to process")
("FrameSkip,-fs", num_frames_skip, 0u, "Number of frames to skip at start of input YUV")
;
po::setDefaults(opts);
po::scanArgv(opts, argc, argv);
if (argc == 1 || do_help)
{
/* argc == 1: no options have been specified */
po::doHelp(cout, opts);
return EXIT_FAILURE;
}
TVideoIOYuv input;
TVideoIOYuv output;
input.open((char*)filename_in.c_str(), false, bitdepth_in, bitdepth_in, bitdepth_out, bitdepth_out);
output.open((char*)filename_out.c_str(), true, bitdepth_out, bitdepth_out, bitdepth_out, bitdepth_out);
input.skipFrames(num_frames_skip, width, height);
TComPicYuv frame;
frame.create( width, height, 1, 1, 0 );
int pad[2] = {0, 0};
unsigned int num_frames_processed = 0;
while (!input.isEof())
{
if (! input.read(&frame, pad))
{
break;
}
#if 0
Pel* img = frame.getLumaAddr();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < height; x++)
img[x] = 0;
img += frame.getStride();
}
img = frame.getLumaAddr();
img[0] = 1;
#endif
output.write(&frame);
num_frames_processed++;
if (num_frames_processed == num_frames)
break;
}
input.close();
output.close();
return EXIT_SUCCESS;
}
示例7: PPAScopeEvent
void FrameEncoder::compressFrame()
{
PPAScopeEvent(FrameEncoder_compressFrame);
int64_t startCompressTime = x265_mdate();
TEncEntropy* entropyCoder = getEntropyCoder(0);
TComSlice* slice = m_pic->getSlice();
m_nalCount = 0;
int qp = slice->getSliceQp();
double lambda = 0;
if (slice->getSliceType() == I_SLICE)
{
lambda = X265_MAX(1, x265_lambda2_tab_I[qp]);
}
else
{
lambda = X265_MAX(1, x265_lambda2_non_I[qp]);
}
// for RDO
// in RdCost there is only one lambda because the luma and chroma bits are not separated,
// instead we weight the distortion of chroma.
int qpc;
int chromaQPOffset = slice->getPPS()->getChromaCbQpOffset() + slice->getSliceQpDeltaCb();
qpc = Clip3(0, 57, qp + chromaQPOffset);
double cbWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset
chromaQPOffset = slice->getPPS()->getChromaCrQpOffset() + slice->getSliceQpDeltaCr();
qpc = Clip3(0, 57, qp + chromaQPOffset);
double crWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset
double chromaLambda = lambda / crWeight;
TComPicYuv *fenc = slice->getPic()->getPicYuvOrg();
for (int i = 0; i < m_numRows; i++)
{
m_rows[i].m_search.setQPLambda(qp, lambda, chromaLambda);
m_rows[i].m_search.m_me.setSourcePlane(fenc->getLumaAddr(), fenc->getStride());
m_rows[i].m_rdCost.setLambda(lambda);
m_rows[i].m_rdCost.setCbDistortionWeight(cbWeight);
m_rows[i].m_rdCost.setCrDistortionWeight(crWeight);
}
m_frameFilter.m_sao.lumaLambda = lambda;
m_frameFilter.m_sao.chromaLambda = chromaLambda;
switch (slice->getSliceType())
{
case I_SLICE:
m_frameFilter.m_sao.depth = 0;
break;
case P_SLICE:
m_frameFilter.m_sao.depth = 1;
break;
case B_SLICE:
m_frameFilter.m_sao.depth = 2 + !slice->isReferenced();
break;
}
slice->setSliceQpDelta(0);
slice->setSliceQpDeltaCb(0);
slice->setSliceQpDeltaCr(0);
int numSubstreams = m_cfg->param.bEnableWavefront ? m_pic->getPicSym()->getFrameHeightInCU() : 1;
// TODO: these two items can likely be FrameEncoder member variables to avoid re-allocs
TComOutputBitstream* bitstreamRedirect = new TComOutputBitstream;
TComOutputBitstream* outStreams = new TComOutputBitstream[numSubstreams];
if (m_cfg->getUseASR() && !slice->isIntra())
{
int pocCurr = slice->getPOC();
int maxSR = m_cfg->param.searchRange;
int numPredDir = slice->isInterP() ? 1 : 2;
for (int dir = 0; dir <= numPredDir; dir++)
{
for (int refIdx = 0; refIdx < slice->getNumRefIdx(dir); refIdx++)
{
int refPOC = slice->getRefPic(dir, refIdx)->getPOC();
int newSR = Clip3(8, maxSR, (maxSR * ADAPT_SR_SCALE * abs(pocCurr - refPOC) + 4) >> 3);
for (int i = 0; i < m_numRows; i++)
{
m_rows[i].m_search.setAdaptiveSearchRange(dir, refIdx, newSR);
}
}
}
}
示例8: GolombCode_Predict_SingleNeighbor
//index== 0 above 1 left 2 above and left 3 above and right
Int GolombCode_Predict_SingleNeighbor(TComYuv *pcResiYuv, TComTU& rTu, const ComponentID compID, UInt uiCUHandleAddr, UInt uiAIndex, TCoeff* pcCoeff)
{
const Bool bIsLuma = isLuma(compID);
const TComRectangle &rect = rTu.getRect(compID);
TComDataCU *pcCU = rTu.getCU();
UInt uiCUAddr = pcCU->getCtuRsAddr();
//if ((int)uiCUHandleAddr < 0) return -1;
TComPicYuv *pcPicYuvResi = pcCU->getPic()->getPicYuvResi();
if (pcPicYuvResi == NULL) return -1;
const UInt uiAbsPartIdx = rTu.GetAbsPartIdxTU();
// const UInt uiZOrder = pcCU->getZorderIdxInCU() +uiAbsPartIdx;
const UInt uiTrDepth = rTu.GetTransformDepthRelAdj(compID);
const UInt uiFullDepth = rTu.GetTransformDepthTotal();
const UInt uiLog2TrSize = rTu.GetLog2LumaTrSize();
const ChromaFormat chFmt = pcCU->getPic()->getChromaFormat();
const UInt uiWidth = rect.width;
const UInt uiHeight = rect.height;
const UInt uiStride = pcResiYuv->getStride(compID);
UInt uiAddr = pcCU->getCtuRsAddr();
TComYuv *pcTemp;
pcTemp = new TComYuv;
UInt uiSrc1Stride = pcPicYuvResi->getStride(compID);
UInt CUPelX, CUPelY;
CUPelX = (uiCUHandleAddr % pcCU->getPic()->getFrameWidthInCtus()) * g_uiMaxCUWidth;
CUPelY = (uiCUHandleAddr / pcCU->getPic()->getFrameWidthInCtus()) * g_uiMaxCUHeight;
CUPelX = CUPelX + g_auiRasterToPelX[g_auiZscanToRaster[uiAbsPartIdx]];
CUPelY = CUPelY + g_auiRasterToPelY[g_auiZscanToRaster[uiAbsPartIdx]];
//for(int m=0;m<256;m++) cout<<g_auiZscanToRaster[m] <<" ";cout<<endl;
//for(int m=0;m<256;m++) cout<<g_auiRasterToPelX[m] <<" ";cout<<endl;
//for(int m=0;m<256;m++) cout<<g_auiRasterToPelY[m] <<" ";cout<<endl;
//Pel *pSrc1 = pcPicYuvResi->getAddr(compID) +CUPelY * uiSrc1Stride + CUPelX;
Pel *pSrc1 = pcPicYuvResi->getAddr(compID, uiCUHandleAddr, uiAbsPartIdx + pcCU->getZorderIdxInCtu());
/* if( compID != COMPONENT_Y)
{
pSrc1 = pcPicYuvResi->getAddr(COMPONENT_Y, uiCUHandleAddr, uiAbsPartIdx + pcCU->getZorderIdxInCU());
}*/
pcTemp->create(uiWidth, uiHeight, chFmt);
// pcTemp->copyFromPicComponent(compID,pcPicYuvResi,uiCUHandleAddr, pcCU->getZorderIdxInCU()+uiAbsPartIdx);
UInt uiTempStride = pcTemp->getStride(compID);
Pel *pTemp = pcTemp->getAddr(compID);
for (Int y = 0; y < uiHeight; y++)
{
for (Int x = 0; x < uiWidth; x++)
{
pTemp[x] = pSrc1[x];
}
pTemp += uiTempStride;
pSrc1 += uiSrc1Stride;
}
int srclx = 0; int srcly = 0; int srclv = 0;
int srchasleft = 1;
Pel srcpel;
int srclist[3][64 * 64];
int srcindex = 0;
memset(srclist, -1, 3 * 64 * 64 * sizeof(int));
int cursrclistindex = 0;
Pel* piSrc = pcTemp->getAddr(compID);
//Pel* piSrc = pcTemp->getAddr(compID, uiAbsPartIdx);
Pel* pSrc = piSrc;
//found the source list
while (srchasleft) {
int ndis = 1000;
int nx = -1; int ny = -1;
pSrc = piSrc;
for (UInt y = 0; y < uiHeight; y++) {
for (UInt x = 0; x<uiWidth; x++) {
assert(pSrc[x] >-256 && pSrc[x] < 256);
if (pSrc[x] != 0) {
int dis = 0;
dis += getG0Bits((x - srclx));
dis += getG0Bits((y - srcly));
if (dis < ndis) {
nx = x;
ny = y;
ndis = dis;
}
}
}
pSrc += uiTempStride;
}
if (nx != -1 && ny != -1) {
srcpel = *(piSrc + ny*uiTempStride + nx);
srclx = nx; srcly = ny; srclv = srcpel;
srclist[0][srcindex] = srclx;
srclist[1][srcindex] = srcly;
srclist[2][srcindex] = srcpel;
srcindex++;
*(piSrc + ny*uiTempStride + nx) = 0;
}
else {
//.........这里部分代码省略.........
示例9: matchTemplate
Void matchTemplate(TComDataCU*& rpcTempCU, Pixel** ppPixel)
{
// template matching
UInt uiCUPelX = rpcTempCU->getCUPelX(); // x of upper left corner of the cu
UInt uiCUPelY = rpcTempCU->getCUPelY(); // y of upper left corner of the cu
UInt uiMaxCUWidth = rpcTempCU->getSlice()->getSPS()->getMaxCUWidth(); // max cu width
UInt uiMaxCUHeight = rpcTempCU->getSlice()->getSPS()->getMaxCUHeight(); // max cu height
// pic
TComPic* pcPic = rpcTempCU->getPic();
TComPicYuv* pcPredYuv = pcPic->getPicYuvPred();
TComPicYuv* pcResiYuv = pcPic->getPicYuvResi();
UInt uiNumValidCopmonent = pcPic->getNumberValidComponents();
vector<PixelTemplate> vInsertList;
for (UInt ch = 0; ch < uiNumValidCopmonent; ch++)
{
int all = 0;
int average = 0;
int afind = 0;
int maxfind = 0, minfind = INT_MAX;
int ax = 0, ay = 0;
int adiff = 0;
ComponentID cId = ComponentID(ch);
// picture description
UInt uiStride = pcPredYuv->getStride(cId); // stride for a certain component
UInt uiPicWidth = pcPredYuv->getWidth(cId); // picture width for a certain component
UInt uiPicHeight = pcPredYuv->getHeight(cId); // picture height for a certain component
UInt uiCBWidth = uiMaxCUWidth >> (pcPredYuv->getComponentScaleX(cId)); // code block width for a certain component
UInt uiCBHeight = uiMaxCUHeight >> (pcPredYuv->getComponentScaleY(cId)); // code block height for a certain component
// rectangle of the code block
UInt uiTopX = Clip3((UInt)0, uiPicWidth, uiCUPelX);
UInt uiTopY = Clip3((UInt)0, uiPicHeight, uiCUPelY);
UInt uiBottomX = Clip3((UInt)0, uiPicWidth, uiCUPelX + uiCBWidth);
UInt uiBottomY = Clip3((UInt)0, uiPicHeight, uiCUPelY + uiCBHeight);
for (UInt uiY = uiTopY; uiY < uiBottomY; uiY++)
{
for (UInt uiX = uiTopX; uiX < uiBottomX; uiX++)
{
UInt uiOrgX, uiOrgY;
uiOrgX = g_auiRsmpldToOrg[cId][0][uiX];
uiOrgY = g_auiRsmpldToOrg[cId][1][uiY];
// template match
UInt uiHashValue1, uiHashValue2;
// get hash values
getHashValue(uiOrgX, uiOrgY, uiPicWidth, ppPixel[cId],uiHashValue1,uiHashValue2);
Pixel* pCurPixel = ppPixel[cId] + getSerialIndex(uiOrgX, uiOrgY, uiPicWidth);
//pCurPixel->m_uiHashValue = uiHashValue1;
assert(uiHashValue1 >= 0 && uiHashValue1 < MAX_PT_NUM);
// lookup table
PixelTemplate* pLookupTable = g_pLookupTable[cId][uiHashValue1];
// number of available template pixels
UInt uiNumTemplate = getNumTemplate(uiOrgX, uiOrgY, uiPicWidth, ppPixel[cId]);
// if uiNumTemplate < 1, predict target with default value and do not insert template
if (uiNumTemplate < 1)
{
UInt uiIdx = uiY * uiStride + uiX;
pcPredYuv->getAddr(cId)[uiIdx] = pCurPixel->m_uiPred;
pcResiYuv->getAddr(cId)[uiIdx] = pCurPixel->m_iResi = pCurPixel->m_uiOrg - pCurPixel->m_uiPred;
continue;
}
// if lookuptable is empty, predict target with default value and insert template
if (pLookupTable == NULL)
{
//
vInsertList.push_back(PixelTemplate(uiOrgX, uiOrgY, uiHashValue1,uiHashValue2,uiNumTemplate,NEW));
UInt uiIdx = uiY*uiStride + uiX;
pcPredYuv->getAddr(cId)[uiIdx] = pCurPixel->m_uiPred;
pcResiYuv->getAddr(cId)[uiIdx] = pCurPixel->m_iResi = pCurPixel->m_uiOrg - pCurPixel->m_uiPred;
continue;
}
MatchMetric mmBestMetric;
UInt uiListLength = 0;
PixelTemplate* pBestMatch = NULL;
PixelTemplate* pPixelTemplate = pLookupTable;
#if PGR_DEBUG
int length = 0;
int a = 0;
int find = 0;
int fx = 0, fy = 0;
int diff = 0;
#endif
UInt uiRemoved = 0;
//.........这里部分代码省略.........
示例10: derivePGRPLT
Void derivePGRPLT(TComDataCU* pcCtu)
{
TComPic* pcPic = pcCtu->getPic();
TComPicYuv* pcOrgYuv = pcPic->getPicYuvOrg();
UInt uiNumValidComponents = pcOrgYuv->getNumberValidComponents();
UInt uiMaxCUWidth = pcCtu->getSlice()->getSPS()->getMaxCUWidth();
UInt uiMaxCUHeight = pcCtu->getSlice()->getSPS()->getMaxCUWidth();
for (UInt ch = 0; ch < uiNumValidComponents; ch++)
{
ComponentID cId = ComponentID(ch);
UInt uiPicWidth = pcOrgYuv->getWidth(cId);
UInt uiPicHeight = pcOrgYuv->getHeight(cId);
UInt uiStride = pcOrgYuv->getStride(cId);
UInt uiCUPelX = pcCtu->getCUPelX() >> (pcOrgYuv->getComponentScaleX(cId)); // x of upper left corner of the cu
UInt uiCUPelY = pcCtu->getCUPelY() >> (pcOrgYuv->getComponentScaleY(cId));; // y of upper left corner of the
UInt uiCBWidth = uiMaxCUWidth >> (pcOrgYuv->getComponentScaleX(cId));
UInt uiCBHeight = uiMaxCUHeight >> (pcOrgYuv->getComponentScaleY(cId));
uiCBWidth = Clip3((UInt)0, uiPicWidth - uiCUPelX, uiCBWidth);
uiCBHeight = Clip3((UInt)0, uiPicHeight - uiCUPelY, uiCBHeight);
// statistics
PelCount* pPixelCount[256];
for (int i = 0; i < 256; i++)
pPixelCount[i] = new PelCount(i);
Pel* pOrg = pcOrgYuv->getAddr(cId, pcCtu->getCtuRsAddr());
for (UInt uiY = 0; uiY < uiCBHeight; uiY++)
{
for (UInt uiX = 0; uiX < uiCBWidth; uiX++)
{
pPixelCount[pOrg[uiX]]->m_uiCount++;
}
pOrg += uiStride;
}
// sort
sort(pPixelCount, pPixelCount + 256, cmpPelCount);
g_ppCTUPalette[cId].m_uiSize = 0;
// insert entry
for (int i = 0, k = 0; k < 4; i++)
{
bool bDuplicate = false;
for (int j = 0; j < 4; j++)
{
// duplicate
if (g_ppCTUPalette[cId].m_pEntry[i] == g_ppPalette[cId].m_pEntry[j])
{
bDuplicate = true;
break;
}
}
if (!bDuplicate)
{
g_ppCTUPalette[cId].m_pEntry[k++] = pPixelCount[i]->m_uiVal;
g_ppCTUPalette[cId].m_uiSize++;
}
}
for (int i = 0; i < 256; i++)
delete pPixelCount[i];
}
}
示例11: xPreanalyze
/** Analyze source picture and compute local image characteristics used for QP adaptation
* \param pcEPic Picture object to be analyzed
* \return Void
*/
Void TEncPreanalyzer::xPreanalyze( TEncPic* pcEPic )
{
TComPicYuv* pcPicYuv = pcEPic->getPicYuvOrg();
const Int iWidth = pcPicYuv->getWidth(COMPONENT_Y);
const Int iHeight = pcPicYuv->getHeight(COMPONENT_Y);
const Int iStride = pcPicYuv->getStride(COMPONENT_Y);
for ( UInt d = 0; d < pcEPic->getMaxAQDepth(); d++ )
{
const Pel* pLineY = pcPicYuv->getAddr(COMPONENT_Y);
TEncPicQPAdaptationLayer* pcAQLayer = pcEPic->getAQLayer(d);
const UInt uiAQPartWidth = pcAQLayer->getAQPartWidth();
const UInt uiAQPartHeight = pcAQLayer->getAQPartHeight();
TEncQPAdaptationUnit* pcAQU = pcAQLayer->getQPAdaptationUnit();
Double dSumAct = 0.0;
for ( UInt y = 0; y < iHeight; y += uiAQPartHeight )
{
const UInt uiCurrAQPartHeight = min(uiAQPartHeight, iHeight-y);
for ( UInt x = 0; x < iWidth; x += uiAQPartWidth, pcAQU++ )
{
const UInt uiCurrAQPartWidth = min(uiAQPartWidth, iWidth-x);
const Pel* pBlkY = &pLineY[x];
UInt64 uiSum[4] = {0, 0, 0, 0};
UInt64 uiSumSq[4] = {0, 0, 0, 0};
UInt uiNumPixInAQPart = 0;
UInt by = 0;
for ( ; by < uiCurrAQPartHeight>>1; by++ )
{
UInt bx = 0;
for ( ; bx < uiCurrAQPartWidth>>1; bx++, uiNumPixInAQPart++ )
{
uiSum [0] += pBlkY[bx];
uiSumSq[0] += pBlkY[bx] * pBlkY[bx];
}
for ( ; bx < uiCurrAQPartWidth; bx++, uiNumPixInAQPart++ )
{
uiSum [1] += pBlkY[bx];
uiSumSq[1] += pBlkY[bx] * pBlkY[bx];
}
pBlkY += iStride;
}
for ( ; by < uiCurrAQPartHeight; by++ )
{
UInt bx = 0;
for ( ; bx < uiCurrAQPartWidth>>1; bx++, uiNumPixInAQPart++ )
{
uiSum [2] += pBlkY[bx];
uiSumSq[2] += pBlkY[bx] * pBlkY[bx];
}
for ( ; bx < uiCurrAQPartWidth; bx++, uiNumPixInAQPart++ )
{
uiSum [3] += pBlkY[bx];
uiSumSq[3] += pBlkY[bx] * pBlkY[bx];
}
pBlkY += iStride;
}
Double dMinVar = DBL_MAX;
for ( Int i=0; i<4; i++)
{
const Double dAverage = Double(uiSum[i]) / uiNumPixInAQPart;
const Double dVariance = Double(uiSumSq[i]) / uiNumPixInAQPart - dAverage * dAverage;
dMinVar = min(dMinVar, dVariance);
}
const Double dActivity = 1.0 + dMinVar;
pcAQU->setActivity( dActivity );
dSumAct += dActivity;
}
pLineY += iStride * uiCurrAQPartHeight;
}
const Double dAvgAct = dSumAct / (pcAQLayer->getNumAQPartInWidth() * pcAQLayer->getNumAQPartInHeight());
pcAQLayer->setAvgActivity( dAvgAct );
}
}
示例12: main
Int main(Int argc, const char** argv)
{
Bool do_help;
string filename_in, filename_out;
UInt width, height;
UInt bitdepth_in, bitdepth_out, chromaFormatRaw;
UInt num_frames;
UInt num_frames_skip;
po::Options opts;
opts.addOptions()
("help", do_help, false, "this help text")
("InputFile,i", filename_in, string(""), "input file to convert")
("OutputFile,o", filename_out, string(""), "output file")
("SourceWidth", width, 0u, "source picture width")
("SourceHeight", height, 0u, "source picture height")
("InputBitDepth", bitdepth_in, 8u, "bit-depth of input file")
("OutputBitDepth", bitdepth_out, 8u, "bit-depth of output file")
("ChromaFormat", chromaFormatRaw, 420u, "chroma format. 400, 420, 422 or 444 only")
("NumFrames", num_frames, 0xffffffffu, "number of frames to process")
("FrameSkip,-fs", num_frames_skip, 0u, "Number of frames to skip at start of input YUV")
;
po::setDefaults(opts);
po::scanArgv(opts, argc, argv);
if (argc == 1 || do_help)
{
/* argc == 1: no options have been specified */
po::doHelp(cout, opts);
return EXIT_FAILURE;
}
ChromaFormat chromaFormatIDC=CHROMA_420;
switch (chromaFormatRaw)
{
case 400:
chromaFormatIDC=CHROMA_400;
break;
case 420:
chromaFormatIDC=CHROMA_420;
break;
case 422:
chromaFormatIDC=CHROMA_422;
break;
case 444:
chromaFormatIDC=CHROMA_444;
break;
default:
fprintf(stderr, "Bad chroma format string\n");
return EXIT_FAILURE;
}
TVideoIOYuv input;
TVideoIOYuv output;
Int inputBitDepths [MAX_NUM_CHANNEL_TYPE];
Int outputBitDepths[MAX_NUM_CHANNEL_TYPE];
for (UInt channelTypeIndex = 0; channelTypeIndex < MAX_NUM_CHANNEL_TYPE; channelTypeIndex++)
{
inputBitDepths [channelTypeIndex] = bitdepth_in;
outputBitDepths[channelTypeIndex] = bitdepth_out;
}
input.open((char*)filename_in.c_str(), false, inputBitDepths, inputBitDepths, outputBitDepths);
output.open((char*)filename_out.c_str(), true, outputBitDepths, outputBitDepths, outputBitDepths);
input.skipFrames(num_frames_skip, width, height, chromaFormatIDC);
TComPicYuv frame;
frame.create( width, height, chromaFormatIDC, 1, 1, 0 );
Int pad[2] = {0, 0};
TComPicYuv cPicYuvTrueOrg;
cPicYuvTrueOrg.create( width, height, chromaFormatIDC, 1, 1, 0 );
UInt num_frames_processed = 0;
while (!input.isEof())
{
if (! input.read(&frame, &cPicYuvTrueOrg, IPCOLOURSPACE_UNCHANGED, pad))
{
break;
}
#if 0
Pel* img = frame.getAddr(COMPONENT_Y);
for (Int y = 0; y < height; y++)
{
for (Int x = 0; x < height; x++)
img[x] = 0;
img += frame.getStride();
}
img = frame.getAddr(COMPONENT_Y);
img[0] = 1;
#endif
output.write(&frame, IPCOLOURSPACE_UNCHANGED);
num_frames_processed++;
//.........这里部分代码省略.........
示例13: xGetNewPicBuffer
Void TEncTop::encode(Bool flush, TComPicYuv* pcPicYuvOrg, TComPicYuv* pcPicYuvTrueOrg, const InputColourSpaceConversion snrCSC, TComList<TComPicYuv*>& rcListPicYuvRecOut, std::list<AccessUnit>& accessUnitsOut, Int& iNumEncoded, Bool isTff)
{
iNumEncoded = 0;
for (Int fieldNum=0; fieldNum<2; fieldNum++)
{
if (pcPicYuvOrg)
{
/* -- field initialization -- */
const Bool isTopField=isTff==(fieldNum==0);
TComPic *pcField;
xGetNewPicBuffer( pcField );
pcField->setReconMark (false); // where is this normally?
if (fieldNum==1) // where is this normally?
{
TComPicYuv* rpcPicYuvRec;
// org. buffer
if ( rcListPicYuvRecOut.size() >= (UInt)m_iGOPSize+1 ) // need to maintain field 0 in list of RecOuts while processing field 1. Hence +1 on m_iGOPSize.
{
rpcPicYuvRec = rcListPicYuvRecOut.popFront();
}
else
{
rpcPicYuvRec = new TComPicYuv;
rpcPicYuvRec->create( m_iSourceWidth, m_iSourceHeight, m_chromaFormatIDC, m_maxCUWidth, m_maxCUHeight, m_maxTotalCUDepth, true);
}
rcListPicYuvRecOut.pushBack( rpcPicYuvRec );
}
pcField->getSlice(0)->setPOC( m_iPOCLast ); // superfluous?
pcField->getPicYuvRec()->setBorderExtension(false);// where is this normally?
pcField->setTopField(isTopField); // interlaced requirement
for (UInt componentIndex = 0; componentIndex < pcPicYuvOrg->getNumberValidComponents(); componentIndex++)
{
const ComponentID component = ComponentID(componentIndex);
const UInt stride = pcPicYuvOrg->getStride(component);
separateFields((pcPicYuvOrg->getBuf(component) + pcPicYuvOrg->getMarginX(component) + (pcPicYuvOrg->getMarginY(component) * stride)),
pcField->getPicYuvOrg()->getAddr(component),
pcPicYuvOrg->getStride(component),
pcPicYuvOrg->getWidth(component),
pcPicYuvOrg->getHeight(component),
isTopField);
separateFields((pcPicYuvTrueOrg->getBuf(component) + pcPicYuvTrueOrg->getMarginX(component) + (pcPicYuvTrueOrg->getMarginY(component) * stride)),
pcField->getPicYuvTrueOrg()->getAddr(component),
pcPicYuvTrueOrg->getStride(component),
pcPicYuvTrueOrg->getWidth(component),
pcPicYuvTrueOrg->getHeight(component),
isTopField);
}
// compute image characteristics
if ( getUseAdaptiveQP() )
{
m_cPreanalyzer.xPreanalyze( dynamic_cast<TEncPic*>( pcField ) );
}
}
if ( m_iNumPicRcvd && ((flush&&fieldNum==1) || (m_iPOCLast/2)==0 || m_iNumPicRcvd==m_iGOPSize ) )
{
// compress GOP
m_cGOPEncoder.compressGOP(m_iPOCLast, m_iNumPicRcvd, m_cListPic, rcListPicYuvRecOut, accessUnitsOut, true, isTff, snrCSC, m_printFrameMSE,&m_cSearch);
iNumEncoded += m_iNumPicRcvd;
m_uiNumAllPicCoded += m_iNumPicRcvd;
m_iNumPicRcvd = 0;
}
}
}