本文整理汇总了C++中TComPicYuv::getLumaAddr方法的典型用法代码示例。如果您正苦于以下问题:C++ TComPicYuv::getLumaAddr方法的具体用法?C++ TComPicYuv::getLumaAddr怎么用?C++ TComPicYuv::getLumaAddr使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TComPicYuv
的用法示例。
在下文中一共展示了TComPicYuv::getLumaAddr方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setLambda
void FrameEncoder::setLambda(int qp, int row)
{
TComSlice* slice = m_pic->getSlice();
TComPicYuv *fenc = slice->getPic()->getPicYuvOrg();
double lambda = 0;
if (m_pic->getSlice()->getSliceType() == I_SLICE)
{
lambda = X265_MAX(1, x265_lambda2_tab_I[qp]);
}
else
{
lambda = X265_MAX(1, x265_lambda2_non_I[qp]);
}
// for RDO
// in RdCost there is only one lambda because the luma and chroma bits are not separated,
// instead we weight the distortion of chroma.
int chromaQPOffset = slice->getPPS()->getChromaCbQpOffset() + slice->getSliceQpDeltaCb();
int qpc = Clip3(0, 70, qp + chromaQPOffset);
double cbWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset
chromaQPOffset = slice->getPPS()->getChromaCrQpOffset() + slice->getSliceQpDeltaCr();
qpc = Clip3(0, 70, qp + chromaQPOffset);
double crWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset
double chromaLambda = lambda / crWeight;
m_rows[row].m_search.setQPLambda(qp, lambda, chromaLambda);
m_rows[row].m_search.m_me.setSourcePlane(fenc->getLumaAddr(), fenc->getStride());
m_rows[row].m_rdCost.setLambda(lambda);
m_rows[row].m_rdCost.setCbDistortionWeight(cbWeight);
m_rows[row].m_rdCost.setCrDistortionWeight(crWeight);
}
示例2: calcMD5
/**
* Calculate the MD5sum of pic, storing the result in digest.
* MD5 calculation is performed on Y' then Cb, then Cr; each in raster order.
* Pel data is inserted into the MD5 function in little-endian byte order,
* using sufficient bytes to represent the picture bitdepth. Eg, 10bit data
* uses little-endian two byte words; 8bit data uses single byte words.
*/
void calcMD5(TComPicYuv& pic, unsigned char digest[16])
{
unsigned bitdepth = g_uiBitDepth + g_uiBitIncrement;
/* choose an md5_plane packing function based on the system bitdepth */
typedef void (*MD5PlaneFunc)(MD5&, const Pel*, unsigned, unsigned, unsigned);
MD5PlaneFunc md5_plane_func;
md5_plane_func = bitdepth <= 8 ? (MD5PlaneFunc)md5_plane<1> : (MD5PlaneFunc)md5_plane<2>;
MD5 md5;
unsigned width = pic.getWidth();
unsigned height = pic.getHeight();
unsigned stride = pic.getStride();
md5_plane_func(md5, pic.getLumaAddr(), width, height, stride);
width >>= 1;
height >>= 1;
stride >>= 1;
md5_plane_func(md5, pic.getCbAddr(), width, height, stride);
md5_plane_func(md5, pic.getCrAddr(), width, height, stride);
md5.finalize(digest);
}
示例3: main
int main(int argc, const char** argv)
{
bool do_help;
string filename_in, filename_out;
unsigned int width, height;
unsigned int bitdepth_in, bitdepth_out;
unsigned int num_frames;
unsigned int num_frames_skip;
po::Options opts;
opts.addOptions()
("help", do_help, false, "this help text")
("InputFile,i", filename_in, string(""), "input file to convert")
("OutputFile,o", filename_out, string(""), "output file")
("SourceWidth", width, 0u, "source picture width")
("SourceHeight", height, 0u, "source picture height")
("InputBitDepth", bitdepth_in, 8u, "bit-depth of input file")
("OutputBitDepth", bitdepth_out, 8u, "bit-depth of output file")
("NumFrames", num_frames, 0xffffffffu, "number of frames to process")
("FrameSkip,-fs", num_frames_skip, 0u, "Number of frames to skip at start of input YUV")
;
po::setDefaults(opts);
po::scanArgv(opts, argc, argv);
if (argc == 1 || do_help)
{
/* argc == 1: no options have been specified */
po::doHelp(cout, opts);
return EXIT_FAILURE;
}
TVideoIOYuv input;
TVideoIOYuv output;
input.open((char*)filename_in.c_str(), false, bitdepth_in, bitdepth_in, bitdepth_out, bitdepth_out);
output.open((char*)filename_out.c_str(), true, bitdepth_out, bitdepth_out, bitdepth_out, bitdepth_out);
input.skipFrames(num_frames_skip, width, height);
TComPicYuv frame;
frame.create( width, height, 1, 1, 0 );
int pad[2] = {0, 0};
unsigned int num_frames_processed = 0;
while (!input.isEof())
{
if (! input.read(&frame, pad))
{
break;
}
#if 0
Pel* img = frame.getLumaAddr();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < height; x++)
img[x] = 0;
img += frame.getStride();
}
img = frame.getLumaAddr();
img[0] = 1;
#endif
output.write(&frame);
num_frames_processed++;
if (num_frames_processed == num_frames)
break;
}
input.close();
output.close();
return EXIT_SUCCESS;
}
示例4: xPreanalyze
/** Analyze source picture and compute local image characteristics used for QP adaptation
* \param pcEPic Picture object to be analyzed
* \return Void
*/
Void TEncPreanalyzer::xPreanalyze( TEncPic* pcEPic )
{
TComPicYuv* pcPicYuv = pcEPic->getPicYuvOrg();
const Int iWidth = pcPicYuv->getWidth();
const Int iHeight = pcPicYuv->getHeight();
const Int iStride = pcPicYuv->getStride();
for ( UInt d = 0; d < pcEPic->getMaxAQDepth(); d++ )
{
const Pel* pLineY = pcPicYuv->getLumaAddr();
TEncPicQPAdaptationLayer* pcAQLayer = pcEPic->getAQLayer(d);
const UInt uiAQPartWidth = pcAQLayer->getAQPartWidth();
const UInt uiAQPartHeight = pcAQLayer->getAQPartHeight();
TEncQPAdaptationUnit* pcAQU = pcAQLayer->getQPAdaptationUnit();
Double dSumAct = 0.0;
for ( UInt y = 0; y < iHeight; y += uiAQPartHeight )
{
const UInt uiCurrAQPartHeight = min(uiAQPartHeight, iHeight-y);
for ( UInt x = 0; x < iWidth; x += uiAQPartWidth, pcAQU++ )
{
const UInt uiCurrAQPartWidth = min(uiAQPartWidth, iWidth-x);
const Pel* pBlkY = &pLineY[x];
UInt64 uiSum[4] = {0, 0, 0, 0};
UInt64 uiSumSq[4] = {0, 0, 0, 0};
UInt uiNumPixInAQPart = 0;
UInt by = 0;
for ( ; by < uiCurrAQPartHeight>>1; by++ )
{
UInt bx = 0;
for ( ; bx < uiCurrAQPartWidth>>1; bx++, uiNumPixInAQPart++ )
{
uiSum [0] += pBlkY[bx];
uiSumSq[0] += pBlkY[bx] * pBlkY[bx];
}
for ( ; bx < uiCurrAQPartWidth; bx++, uiNumPixInAQPart++ )
{
uiSum [1] += pBlkY[bx];
uiSumSq[1] += pBlkY[bx] * pBlkY[bx];
}
pBlkY += iStride;
}
for ( ; by < uiCurrAQPartHeight; by++ )
{
UInt bx = 0;
for ( ; bx < uiCurrAQPartWidth>>1; bx++, uiNumPixInAQPart++ )
{
uiSum [2] += pBlkY[bx];
uiSumSq[2] += pBlkY[bx] * pBlkY[bx];
}
for ( ; bx < uiCurrAQPartWidth; bx++, uiNumPixInAQPart++ )
{
uiSum [3] += pBlkY[bx];
uiSumSq[3] += pBlkY[bx] * pBlkY[bx];
}
pBlkY += iStride;
}
Double dMinVar = DBL_MAX;
for ( Int i=0; i<4; i++)
{
const Double dAverage = Double(uiSum[i]) / uiNumPixInAQPart;
const Double dVariance = Double(uiSumSq[i]) / uiNumPixInAQPart - dAverage * dAverage;
dMinVar = min(dMinVar, dVariance);
}
const Double dActivity = 1.0 + dMinVar;
pcAQU->setActivity( dActivity );
dSumAct += dActivity;
}
pLineY += iStride * uiCurrAQPartHeight;
}
const Double dAvgAct = dSumAct / (pcAQLayer->getNumAQPartInWidth() * pcAQLayer->getNumAQPartInHeight());
pcAQLayer->setAvgActivity( dAvgAct );
}
}
示例5: compressFrame
void FrameEncoder::compressFrame()
{
PPAScopeEvent(FrameEncoder_compressFrame);
int64_t startCompressTime = x265_mdate();
TEncEntropy* entropyCoder = getEntropyCoder(0);
TComSlice* slice = m_pic->getSlice();
m_nalCount = 0;
int qp = slice->getSliceQp();
double lambda = 0;
if (slice->getSliceType() == I_SLICE)
{
lambda = X265_MAX(1, x265_lambda2_tab_I[qp]);
}
else
{
lambda = X265_MAX(1, x265_lambda2_non_I[qp]);
}
// for RDO
// in RdCost there is only one lambda because the luma and chroma bits are not separated,
// instead we weight the distortion of chroma.
int qpc;
int chromaQPOffset = slice->getPPS()->getChromaCbQpOffset() + slice->getSliceQpDeltaCb();
qpc = Clip3(0, 57, qp + chromaQPOffset);
double cbWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset
chromaQPOffset = slice->getPPS()->getChromaCrQpOffset() + slice->getSliceQpDeltaCr();
qpc = Clip3(0, 57, qp + chromaQPOffset);
double crWeight = pow(2.0, (qp - g_chromaScale[qpc])); // takes into account of the chroma qp mapping and chroma qp Offset
double chromaLambda = lambda / crWeight;
TComPicYuv *fenc = slice->getPic()->getPicYuvOrg();
for (int i = 0; i < m_numRows; i++)
{
m_rows[i].m_search.setQPLambda(qp, lambda, chromaLambda);
m_rows[i].m_search.m_me.setSourcePlane(fenc->getLumaAddr(), fenc->getStride());
m_rows[i].m_rdCost.setLambda(lambda);
m_rows[i].m_rdCost.setCbDistortionWeight(cbWeight);
m_rows[i].m_rdCost.setCrDistortionWeight(crWeight);
}
m_frameFilter.m_sao.lumaLambda = lambda;
m_frameFilter.m_sao.chromaLambda = chromaLambda;
switch (slice->getSliceType())
{
case I_SLICE:
m_frameFilter.m_sao.depth = 0;
break;
case P_SLICE:
m_frameFilter.m_sao.depth = 1;
break;
case B_SLICE:
m_frameFilter.m_sao.depth = 2 + !slice->isReferenced();
break;
}
slice->setSliceQpDelta(0);
slice->setSliceQpDeltaCb(0);
slice->setSliceQpDeltaCr(0);
int numSubstreams = m_cfg->param.bEnableWavefront ? m_pic->getPicSym()->getFrameHeightInCU() : 1;
// TODO: these two items can likely be FrameEncoder member variables to avoid re-allocs
TComOutputBitstream* bitstreamRedirect = new TComOutputBitstream;
TComOutputBitstream* outStreams = new TComOutputBitstream[numSubstreams];
if (m_cfg->getUseASR() && !slice->isIntra())
{
int pocCurr = slice->getPOC();
int maxSR = m_cfg->param.searchRange;
int numPredDir = slice->isInterP() ? 1 : 2;
for (int dir = 0; dir <= numPredDir; dir++)
{
for (int refIdx = 0; refIdx < slice->getNumRefIdx(dir); refIdx++)
{
int refPOC = slice->getRefPic(dir, refIdx)->getPOC();
int newSR = Clip3(8, maxSR, (maxSR * ADAPT_SR_SCALE * abs(pocCurr - refPOC) + 4) >> 3);
for (int i = 0; i < m_numRows; i++)
{
m_rows[i].m_search.setAdaptiveSearchRange(dir, refIdx, newSR);
}
}
}
}