本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.CacheConfig类的典型用法代码示例。如果您正苦于以下问题:Java CacheConfig类的具体用法?Java CacheConfig怎么用?Java CacheConfig使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CacheConfig类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了CacheConfig类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createDeleteBloomAtWrite
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
/**
* Creates a new Delete Family Bloom filter at the time of
* {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
* @param conf
* @param cacheConf
* @param maxKeys an estimate of the number of keys we expect to insert.
* Irrelevant if compound Bloom filters are enabled.
* @param writer the HFile writer
* @return the new Bloom filter, or null in case Bloom filters are disabled
* or when failed to create one.
*/
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
if (!isDeleteFamilyBloomEnabled(conf)) {
LOG.info("Delete Bloom filters are disabled by configuration for "
+ writer.getPath()
+ (conf == null ? " (configuration is null)" : ""));
return null;
}
float err = getErrorRate(conf);
int maxFold = getMaxFold(conf);
// In case of compound Bloom filters we ignore the maxKeys hint.
CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
KeyValue.RAW_COMPARATOR);
writer.addInlineBlockWriter(bloomWriter);
return bloomWriter;
}
示例2: initRowKeyList
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
private List<byte[]> initRowKeyList(FileSystem fileSystem, CacheConfig cacheConf,
Configuration conf, TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap,
ScanRange.ScanRangeList rangeList) throws IOException {
// init
StoreFile bucketStoreFile =
new StoreFile(fileSystem, LMDIndexParameters.getTmpBucketFilePath(file.getPath()), conf,
cacheConf, BloomType.NONE);
StoreFile secondaryStoreFile =
new StoreFile(fileSystem, LMDIndexParameters.getTmpSecondaryFilePath(file.getPath()), conf,
cacheConf, BloomType.NONE);
StoreFileScanner bucketScanner = getStoreFileScanner(bucketStoreFile);
StoreFileScanner secondaryScanner = getStoreFileScanner(secondaryStoreFile);
// get hit buckets
MDRange[] ranges = getRanges(indexFamilyMap, rangeList);
List<LMDBucket> bucketList = getBucketRanges(bucketScanner, ranges);
// scan rowkeys based on the buckets
List<byte[]> rowkeyList = getRawRowkeyList(secondaryScanner, bucketList, ranges);
// deinit
bucketScanner.close();
bucketStoreFile.closeReader(true);
secondaryScanner.close();
secondaryStoreFile.closeReader(true);
return rowkeyList;
}
示例3: createHFile
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
private void createHFile(Path path,
byte[] family, byte[] qualifier,
byte[] startKey, byte[] endKey, int numRows) throws IOException {
HFile.Writer writer = null;
long now = System.currentTimeMillis();
try {
HFileContext context = new HFileContextBuilder().build();
writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
.withPath(fs, path)
.withFileContext(context)
.create();
// subtract 2 since numRows doesn't include boundary keys
for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) {
KeyValue kv = new KeyValue(key, family, qualifier, now, key);
writer.append(kv);
}
} finally {
if(writer != null)
writer.close();
}
}
示例4: createHFile
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
private static void createHFile(
Configuration conf,
FileSystem fs, Path path,
byte[] family, byte[] qualifier) throws IOException {
HFileContext context = new HFileContextBuilder().build();
HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
.withPath(fs, path)
.withFileContext(context)
.create();
long now = System.currentTimeMillis();
try {
for (int i =1;i<=9;i++) {
KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
writer.append(kv);
}
} finally {
writer.close();
}
}
示例5: createHFile
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
/**
* Create an HFile with the given number of rows with a specified value.
*/
public static void createHFile(FileSystem fs, Path path, byte[] family,
byte[] qualifier, byte[] value, int numRows) throws IOException {
HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
.withCompression(COMPRESSION)
.build();
HFile.Writer writer = HFile
.getWriterFactory(conf, new CacheConfig(conf))
.withPath(fs, path)
.withFileContext(context)
.create();
long now = System.currentTimeMillis();
try {
// subtract 2 since iterateOnSplits doesn't include boundary keys
for (int i = 0; i < numRows; i++) {
KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
writer.append(kv);
}
writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(now));
} finally {
writer.close();
}
}
示例6: testCodecs
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
/**
* Test a data block encoder on the given HFile. Output results to console.
* @param kvLimit The limit of KeyValue which will be analyzed.
* @param hfilePath an HFile path on the file system.
* @param compressionName Compression algorithm used for comparison.
* @param doBenchmark Run performance benchmarks.
* @param doVerify Verify correctness.
* @throws IOException When pathName is incorrect.
*/
public static void testCodecs(Configuration conf, int kvLimit,
String hfilePath, String compressionName, boolean doBenchmark,
boolean doVerify) throws IOException {
// create environment
Path path = new Path(hfilePath);
CacheConfig cacheConf = new CacheConfig(conf);
FileSystem fs = FileSystem.get(conf);
StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
BloomType.NONE);
StoreFile.Reader reader = hsf.createReader();
reader.loadFileInfo();
KeyValueScanner scanner = reader.getStoreFileScanner(true, true);
// run the utilities
DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
int majorVersion = reader.getHFileVersion();
comp.useHBaseChecksum = majorVersion > 2
|| (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM);
comp.checkStatistics(scanner, kvLimit);
if (doVerify) {
comp.verifyCodecs(scanner, kvLimit);
}
if (doBenchmark) {
comp.benchmarkCodecs();
}
comp.displayStatistics();
// cleanup
scanner.close();
reader.close(cacheConf.shouldEvictOnClose());
}
示例7: initHRegion
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
/**
* Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
* @param tableName
* @param callingMethod
* @param conf
* @param family
* @throws IOException
* @return created and initialized region.
*/
private HRegion initHRegion(byte[] tableName, String callingMethod,
HBaseConfiguration conf, String family) throws IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
HColumnDescriptor familyDesc;
for (int i = 0; i < BLOOM_TYPE.length; i++) {
BloomType bloomType = BLOOM_TYPE[i];
familyDesc = new HColumnDescriptor(family + "_" + bloomType)
.setBlocksize(1)
.setBloomFilterType(BLOOM_TYPE[i]);
htd.addFamily(familyDesc);
}
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
blockCache = new CacheConfig(conf).getBlockCache();
return r;
}
示例8: testCreateWriter
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
/**
* Verify that compression and data block encoding are respected by the
* Store.createWriterInTmp() method, used on store flush.
*/
@Test
public void testCreateWriter() throws Exception {
Configuration conf = HBaseConfiguration.create();
FileSystem fs = FileSystem.get(conf);
HColumnDescriptor hcd = new HColumnDescriptor(family);
hcd.setCompressionType(Compression.Algorithm.GZ);
hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
init(name.getMethodName(), conf, hcd);
// Test createWriterInTmp()
StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
Path path = writer.getPath();
writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
writer.close();
// Verify that compression and encoding settings are respected
HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
reader.close();
}
示例9: addStoreFile
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
private void addStoreFile() throws IOException {
StoreFile f = this.store.getStorefiles().iterator().next();
Path storedir = f.getPath().getParent();
long seqid = this.store.getMaxSequenceId();
Configuration c = TEST_UTIL.getConfiguration();
FileSystem fs = FileSystem.get(c);
HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
fs)
.withOutputDir(storedir)
.withFileContext(fileContext)
.build();
w.appendMetadata(seqid + 1, false);
w.close();
LOG.info("Added store file:" + w.getPath());
}
示例10: extractHFileKey
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
private static byte[] extractHFileKey(Path path) throws Exception {
HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
new CacheConfig(conf), conf);
try {
reader.loadFileInfo();
Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
assertNotNull("Reader has a null crypto context", cryptoContext);
Key key = cryptoContext.getKey();
if (key == null) {
return null;
}
return key.getEncoded();
} finally {
reader.close();
}
}
示例11: createHFile
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
/**
* Create an HFile with the given number of rows between a given
* start key and end key.
* TODO put me in an HFileTestUtil or something?
*/
static void createHFile(
Configuration conf,
FileSystem fs, Path path,
byte[] family, byte[] qualifier,
byte[] startKey, byte[] endKey, int numRows) throws IOException
{
HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
.withPath(fs, path)
.withBlockSize(BLOCKSIZE)
.withCompression(COMPRESSION)
.withComparator(KeyValue.KEY_COMPARATOR)
.create();
long now = System.currentTimeMillis();
try {
// subtract 2 since iterateOnSplits doesn't include boundary keys
for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
KeyValue kv = new KeyValue(key, family, qualifier, now, key);
writer.append(kv);
}
} finally {
writer.close();
}
}
示例12: createHFile
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
/**
* Create an HFile with the given number of rows between a given
* start key and end key.
*/
public static void createHFile(
Configuration configuration,
FileSystem fs, Path path,
byte[] family, byte[] qualifier,
byte[] startKey, byte[] endKey, int numRows) throws IOException
{
HFileContext meta = new HFileContextBuilder().build();
HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
.withPath(fs, path)
.withFileContext(meta)
.create();
long now = System.currentTimeMillis();
try {
// subtract 2 since iterateOnSplits doesn't include boundary keys
for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
KeyValue kv = new KeyValue(key, family, qualifier, now, key);
writer.append(kv);
}
} finally {
writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
Bytes.toBytes(System.currentTimeMillis()));
writer.close();
}
}
示例13: readHFileSeq
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
private void readHFileSeq(Path file, Compression.Algorithm compression) throws Exception
{
CacheConfig cacheConf = new CacheConfig(conf);
HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
HFileScanner scanner = reader.getScanner(true, true, false);
scanner.seekTo();
@SuppressWarnings("unused")
KeyValue kv = null;
while (scanner.next()) {
kv = scanner.getKeyValue();
//logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
}
}
示例14: readHFileSeqId
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
private void readHFileSeqId(Path file, Compression.Algorithm compression) throws Exception
{
CacheConfig cacheConf = new CacheConfig(conf);
HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
HFileScanner scanner = reader.getScanner(true, true, false);
@SuppressWarnings("unused")
KeyValue kv = null;
scanner.seekTo();
for (int i = 0; i < testSize; i++) {
scanner.seekTo(getKey(i).getBytes());
kv = scanner.getKeyValue();
//logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
}
}
示例15: getRecordWriter
import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入依赖的package包/类
public RecordWriter<BytesWritable, BytesWritable> getRecordWriter(
TaskAttemptContext context) throws IOException {
// Get the path of the temporary output file
final Path outputPath = FileOutputFormat.getOutputPath(context);
final Path outputDir = new FileOutputCommitter(outputPath, context).getWorkPath();
final Configuration conf = context.getConfiguration();
final FileSystem fs = outputDir.getFileSystem(conf);
int blockSize = conf.getInt(Constants.HFILE_BLOCKSIZE, 16384);
// Default to snappy.
Compression.Algorithm compressionAlgorithm = getAlgorithm(
conf.get(Constants.HFILE_COMPRESSION));
final StoreFile.Writer writer =
new StoreFile.WriterBuilder(conf, new CacheConfig(conf), fs, blockSize)
.withFilePath(hfilePath(outputPath, context.getTaskAttemptID().getTaskID().getId()))
.withCompression(compressionAlgorithm)
.build();
return new HFileRecordWriter(writer);
}