本文整理汇总了C#中ManagedCuda.NPP.NPPImage_8uC1.Resize方法的典型用法代码示例。如果您正苦于以下问题:C# NPPImage_8uC1.Resize方法的具体用法?C# NPPImage_8uC1.Resize怎么用?C# NPPImage_8uC1.Resize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ManagedCuda.NPP.NPPImage_8uC1
的用法示例。
在下文中一共展示了NPPImage_8uC1.Resize方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: SaveJpeg
public static void SaveJpeg(string aFilename, int aQuality, Bitmap aImage)
{
if (aImage.PixelFormat != System.Drawing.Imaging.PixelFormat.Format24bppRgb)
{
throw new ArgumentException("Only three channel color images are supported.");
}
if (aImage.Width % 16 != 0 || aImage.Height % 16 != 0)
{
throw new ArgumentException("The provided bitmap must have a height and width of a multiple of 16.");
}
JPEGCompression compression = new JPEGCompression();
NPPImage_8uC3 src = new NPPImage_8uC3(aImage.Width, aImage.Height);
NPPImage_8uC1 srcY = new NPPImage_8uC1(aImage.Width, aImage.Height);
NPPImage_8uC1 srcCb = new NPPImage_8uC1(aImage.Width / 2, aImage.Height / 2);
NPPImage_8uC1 srcCr = new NPPImage_8uC1(aImage.Width / 2, aImage.Height / 2);
src.CopyToDevice(aImage);
//System.Drawing.Bitmap is ordered BGR not RGB
//The NPP routine BGR to YCbCR outputs the values in clamped range, following the YCbCr standard.
//But JPEG uses unclamped values ranging all from [0..255], thus use our own color matrix:
float[,] BgrToYCbCr = new float[3, 4]
{{0.114f, 0.587f, 0.299f, 0},
{0.5f, -0.33126f, -0.16874f, 128},
{-0.08131f, -0.41869f, 0.5f, 128}};
src.ColorTwist(BgrToYCbCr);
//Reduce size of of Cb and Cr channel
src.Copy(srcY, 2);
srcY.Resize(srcCr, 0.5, 0.5, InterpolationMode.SuperSampling);
src.Copy(srcY, 1);
srcY.Resize(srcCb, 0.5, 0.5, InterpolationMode.SuperSampling);
src.Copy(srcY, 0);
FrameHeader oFrameHeader = new FrameHeader();
oFrameHeader.nComponents = 3;
oFrameHeader.nHeight = (ushort)aImage.Height;
oFrameHeader.nSamplePrecision = 8;
oFrameHeader.nWidth = (ushort)aImage.Width;
oFrameHeader.aComponentIdentifier = new byte[] { 1, 2, 3 };
oFrameHeader.aSamplingFactors = new byte[] { 34, 17, 17 }; //Y channel is twice the sice of Cb/Cr channel
oFrameHeader.aQuantizationTableSelector = new byte[] { 0, 1, 1 };
//Get quantization tables from JPEG standard with quality scaling
QuantizationTable[] aQuantizationTables = new QuantizationTable[2];
aQuantizationTables[0] = new QuantizationTable(QuantizationTable.QuantizationType.Luminance, aQuality);
aQuantizationTables[1] = new QuantizationTable(QuantizationTable.QuantizationType.Chroma, aQuality);
CudaDeviceVariable<byte>[] pdQuantizationTables = new CudaDeviceVariable<byte>[2];
pdQuantizationTables[0] = aQuantizationTables[0].aTable;
pdQuantizationTables[1] = aQuantizationTables[1].aTable;
//Get Huffman tables from JPEG standard
HuffmanTable[] aHuffmanTables = new HuffmanTable[4];
aHuffmanTables[0] = new HuffmanTable(HuffmanTable.HuffmanType.LuminanceDC);
aHuffmanTables[1] = new HuffmanTable(HuffmanTable.HuffmanType.ChromaDC);
aHuffmanTables[2] = new HuffmanTable(HuffmanTable.HuffmanType.LuminanceAC);
aHuffmanTables[3] = new HuffmanTable(HuffmanTable.HuffmanType.ChromaAC);
//Set header
ScanHeader oScanHeader = new ScanHeader();
oScanHeader.nA = 0;
oScanHeader.nComponents = 3;
oScanHeader.nSe = 63;
oScanHeader.nSs = 0;
oScanHeader.aComponentSelector = new byte[] { 1, 2, 3 };
oScanHeader.aHuffmanTablesSelector = new byte[] { 0, 17, 17 };
NPPImage_16sC1[] apdDCT = new NPPImage_16sC1[3];
NPPImage_8uC1[] apDstImage = new NPPImage_8uC1[3];
NppiSize[] aDstSize = new NppiSize[3];
aDstSize[0] = new NppiSize(srcY.Width, srcY.Height);
aDstSize[1] = new NppiSize(srcCb.Width, srcCb.Height);
aDstSize[2] = new NppiSize(srcCr.Width, srcCr.Height);
// Compute channel sizes as stored in the output JPEG (8x8 blocks & MCU block layout)
NppiSize oDstImageSize = new NppiSize();
float frameWidth = (float)Math.Floor((float)oFrameHeader.nWidth);
float frameHeight = (float)Math.Floor((float)oFrameHeader.nHeight);
oDstImageSize.width = (int)Math.Max(1.0f, frameWidth);
oDstImageSize.height = (int)Math.Max(1.0f, frameHeight);
//Console.WriteLine("Output Size: " + oDstImageSize.width + "x" + oDstImageSize.height + "x" + (int)(oFrameHeader.nComponents));
apDstImage[0] = srcY;
apDstImage[1] = srcCb;
apDstImage[2] = srcCr;
int nMCUBlocksH = 0;
int nMCUBlocksV = 0;
// Compute channel sizes as stored in the JPEG (8x8 blocks & MCU block layout)
for (int i = 0; i < oFrameHeader.nComponents; ++i)
{
nMCUBlocksV = Math.Max(nMCUBlocksV, oFrameHeader.aSamplingFactors[i] >> 4);
//.........这里部分代码省略.........