本文整理汇总了Java中org.apache.hadoop.hbase.io.encoding.EncodedDataBlock.getEncodedCompressedSize方法的典型用法代码示例。如果您正苦于以下问题:Java EncodedDataBlock.getEncodedCompressedSize方法的具体用法?Java EncodedDataBlock.getEncodedCompressedSize怎么用?Java EncodedDataBlock.getEncodedCompressedSize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.encoding.EncodedDataBlock
的用法示例。
在下文中一共展示了EncodedDataBlock.getEncodedCompressedSize方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: displayStatistics
import org.apache.hadoop.hbase.io.encoding.EncodedDataBlock; //导入方法依赖的package包/类
/**
* Display statistics of different compression algorithms.
* @throws IOException
*/
public void displayStatistics() throws IOException {
final String comprAlgo = compressionAlgorithmName.toUpperCase();
long rawBytes = totalKeyLength + totalPrefixLength + totalValueLength;
System.out.println("Raw data size:");
outputTuple("Raw bytes", INT_FORMAT, rawBytes);
outputTuplePct("Key bytes", totalKeyLength);
outputTuplePct("Value bytes", totalValueLength);
outputTuplePct("KV infrastructure", totalPrefixLength);
outputTuplePct("CF overhead", totalCFLength);
outputTuplePct("Total key redundancy", totalKeyRedundancyLength);
int compressedSize = EncodedDataBlock.getCompressedSize(
compressionAlgorithm, compressor, rawKVs, 0, rawKVs.length);
outputTuple(comprAlgo + " only size", INT_FORMAT,
compressedSize);
outputSavings(comprAlgo + " only", compressedSize, rawBytes);
System.out.println();
for (EncodedDataBlock codec : codecs) {
System.out.println(codec.toString());
long encodedBytes = codec.getSize();
outputTuple("Encoded bytes", INT_FORMAT, encodedBytes);
outputSavings("Key encoding", encodedBytes - totalValueLength,
rawBytes - totalValueLength);
outputSavings("Total encoding", encodedBytes, rawBytes);
int encodedCompressedSize = codec.getEncodedCompressedSize(
compressionAlgorithm, compressor);
outputTuple("Encoding + " + comprAlgo + " size", INT_FORMAT,
encodedCompressedSize);
outputSavings("Encoding + " + comprAlgo, encodedCompressedSize, rawBytes);
outputSavings("Encoding with " + comprAlgo, encodedCompressedSize,
compressedSize);
System.out.println();
}
}
示例2: displayStatistics
import org.apache.hadoop.hbase.io.encoding.EncodedDataBlock; //导入方法依赖的package包/类
/**
* Display statistics of different compression algorithms.
* @throws IOException
*/
public void displayStatistics() throws IOException {
final String comprAlgo = compressionAlgorithmName.toUpperCase(Locale.ROOT);
long rawBytes = totalKeyLength + totalPrefixLength + totalValueLength;
System.out.println("Raw data size:");
outputTuple("Raw bytes", INT_FORMAT, rawBytes);
outputTuplePct("Key bytes", totalKeyLength);
outputTuplePct("Value bytes", totalValueLength);
outputTuplePct("KV infrastructure", totalPrefixLength);
outputTuplePct("CF overhead", totalCFLength);
outputTuplePct("Total key redundancy", totalKeyRedundancyLength);
int compressedSize = EncodedDataBlock.getCompressedSize(
compressionAlgorithm, compressor, rawKVs, 0, rawKVs.length);
outputTuple(comprAlgo + " only size", INT_FORMAT,
compressedSize);
outputSavings(comprAlgo + " only", compressedSize, rawBytes);
System.out.println();
for (EncodedDataBlock codec : codecs) {
System.out.println(codec.toString());
long encodedBytes = codec.getSize();
outputTuple("Encoded bytes", INT_FORMAT, encodedBytes);
outputSavings("Key encoding", encodedBytes - totalValueLength,
rawBytes - totalValueLength);
outputSavings("Total encoding", encodedBytes, rawBytes);
int encodedCompressedSize = codec.getEncodedCompressedSize(
compressionAlgorithm, compressor);
outputTuple("Encoding + " + comprAlgo + " size", INT_FORMAT,
encodedCompressedSize);
outputSavings("Encoding + " + comprAlgo, encodedCompressedSize, rawBytes);
outputSavings("Encoding with " + comprAlgo, encodedCompressedSize,
compressedSize);
System.out.println();
}
}