本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter.run方法的典型用法代码示例。如果您正苦于以下问题:Java HFilePrettyPrinter.run方法的具体用法?Java HFilePrettyPrinter.run怎么用?Java HFilePrettyPrinter.run使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter
的用法示例。
在下文中一共展示了HFilePrettyPrinter.run方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: runMergeWorkload
import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入方法依赖的package包/类
public void runMergeWorkload() throws IOException {
long maxKeyCount = prepareForMerge();
List<StoreFileScanner> scanners =
StoreFileScanner.getScannersForStoreFiles(inputStoreFiles, false,
false);
HColumnDescriptor columnDescriptor = new HColumnDescriptor(
HFileReadWriteTest.class.getSimpleName());
columnDescriptor.setBlocksize(blockSize);
columnDescriptor.setBloomFilterType(bloomType);
columnDescriptor.setCompressionType(compression);
columnDescriptor.setDataBlockEncoding(dataBlockEncoding);
HRegionInfo regionInfo = new HRegionInfo();
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd,
null);
Store store = new Store(outputDir, region, columnDescriptor, fs, conf);
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf,
new CacheConfig(conf), fs, blockSize)
.withOutputDir(outputDir)
.withCompression(compression)
.withDataBlockEncoder(dataBlockEncoder)
.withBloomType(bloomType)
.withMaxKeyCount(maxKeyCount)
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
.withBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
.build();
StatisticsPrinter statsPrinter = new StatisticsPrinter();
statsPrinter.startThread();
try {
performMerge(scanners, store, writer);
writer.close();
} finally {
statsPrinter.requestStop();
}
Path resultPath = writer.getPath();
resultPath = tryUsingSimpleOutputPath(resultPath);
long fileSize = fs.getFileStatus(resultPath).getLen();
LOG.info("Created " + resultPath + ", size " + fileSize);
System.out.println();
System.out.println("HFile information for " + resultPath);
System.out.println();
HFilePrettyPrinter hfpp = new HFilePrettyPrinter();
hfpp.run(new String[] { "-m", "-f", resultPath.toString() });
}
示例2: runMergeWorkload
import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入方法依赖的package包/类
public void runMergeWorkload() throws IOException {
long maxKeyCount = prepareForMerge();
HColumnDescriptor columnDescriptor = new HColumnDescriptor(
HFileReadWriteTest.class.getSimpleName());
columnDescriptor.setBlocksize(blockSize);
columnDescriptor.setBloomFilterType(bloomType);
columnDescriptor.setCompressionType(compression);
columnDescriptor.setDataBlockEncoding(dataBlockEncoding);
HRegionInfo regionInfo = new HRegionInfo();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd, null);
HStore store = new HStore(region, columnDescriptor, conf);
List<StoreFileScanner> scanners =
StoreFileScanner.getScannersForStoreFiles(inputStoreFiles, false,
false, region.getReadpoint(IsolationLevel.READ_COMMITTED));
StoreFile.Writer writer = store.createWriterInTmp(maxKeyCount, compression, false, true, false);
StatisticsPrinter statsPrinter = new StatisticsPrinter();
statsPrinter.startThread();
try {
performMerge(scanners, store, writer);
writer.close();
} finally {
statsPrinter.requestStop();
}
Path resultPath = writer.getPath();
resultPath = tryUsingSimpleOutputPath(resultPath);
long fileSize = fs.getFileStatus(resultPath).getLen();
LOG.info("Created " + resultPath + ", size " + fileSize);
System.out.println();
System.out.println("HFile information for " + resultPath);
System.out.println();
HFilePrettyPrinter hfpp = new HFilePrettyPrinter();
hfpp.run(new String[] { "-m", "-f", resultPath.toString() });
}
示例3: runMergeWorkload
import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入方法依赖的package包/类
public void runMergeWorkload() throws IOException {
long maxKeyCount = prepareForMerge();
List<StoreFileScanner> scanners =
StoreFileScanner.getScannersForStoreFiles(inputStoreFiles, false,
false);
HColumnDescriptor columnDescriptor = new HColumnDescriptor(
HFileReadWriteTest.class.getSimpleName());
columnDescriptor.setBlocksize(blockSize);
columnDescriptor.setBloomFilterType(bloomType);
columnDescriptor.setCompressionType(compression);
columnDescriptor.setDataBlockEncoding(dataBlockEncoding);
HRegionInfo regionInfo = new HRegionInfo();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd, null);
HStore store = new HStore(region, columnDescriptor, conf);
StoreFile.Writer writer = store.createWriterInTmp(maxKeyCount, compression, false, true);
StatisticsPrinter statsPrinter = new StatisticsPrinter();
statsPrinter.startThread();
try {
performMerge(scanners, store, writer);
writer.close();
} finally {
statsPrinter.requestStop();
}
Path resultPath = writer.getPath();
resultPath = tryUsingSimpleOutputPath(resultPath);
long fileSize = fs.getFileStatus(resultPath).getLen();
LOG.info("Created " + resultPath + ", size " + fileSize);
System.out.println();
System.out.println("HFile information for " + resultPath);
System.out.println();
HFilePrettyPrinter hfpp = new HFilePrettyPrinter();
hfpp.run(new String[] { "-m", "-f", resultPath.toString() });
}
示例4: runMergeWorkload
import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入方法依赖的package包/类
public void runMergeWorkload() throws IOException {
long maxKeyCount = prepareForMerge();
List<StoreFileScanner> scanners =
StoreFileScanner.getScannersForStoreFiles(inputStoreFiles, false,
false);
HColumnDescriptor columnDescriptor = new HColumnDescriptor(
HFileReadWriteTest.class.getSimpleName());
columnDescriptor.setBlocksize(blockSize);
columnDescriptor.setBloomFilterType(bloomType);
columnDescriptor.setCompressionType(compression);
columnDescriptor.setDataBlockEncoding(dataBlockEncoding);
HRegionInfo regionInfo = new HRegionInfo();
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd,
null);
HStore store = new HStore(outputDir, region, columnDescriptor, fs, conf);
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf,
new CacheConfig(conf), fs, blockSize)
.withOutputDir(outputDir)
.withCompression(compression)
.withDataBlockEncoder(dataBlockEncoder)
.withBloomType(bloomType)
.withMaxKeyCount(maxKeyCount)
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
.withBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
.build();
StatisticsPrinter statsPrinter = new StatisticsPrinter();
statsPrinter.startThread();
try {
performMerge(scanners, store, writer);
writer.close();
} finally {
statsPrinter.requestStop();
}
Path resultPath = writer.getPath();
resultPath = tryUsingSimpleOutputPath(resultPath);
long fileSize = fs.getFileStatus(resultPath).getLen();
LOG.info("Created " + resultPath + ", size " + fileSize);
System.out.println();
System.out.println("HFile information for " + resultPath);
System.out.println();
HFilePrettyPrinter hfpp = new HFilePrettyPrinter();
hfpp.run(new String[] { "-m", "-f", resultPath.toString() });
}