本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.FileInfo.get方法的典型用法代码示例。如果您正苦于以下问题:Java FileInfo.get方法的具体用法?Java FileInfo.get怎么用?Java FileInfo.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.HFile.FileInfo
的用法示例。
在下文中一共展示了FileInfo.get方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createFromFileInfo
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
public static HFileDataBlockEncoder createFromFileInfo(
FileInfo fileInfo) throws IOException {
DataBlockEncoding encoding = DataBlockEncoding.NONE;
byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
if (dataBlockEncodingType != null) {
String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
try {
encoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
} catch (IllegalArgumentException ex) {
throw new IOException("Invalid data block encoding type in file info: "
+ dataBlockEncodingStr, ex);
}
}
if (encoding == DataBlockEncoding.NONE) {
return NoOpDataBlockEncoder.INSTANCE;
}
return new HFileDataBlockEncoderImpl(encoding);
}
示例2: createFromFileInfo
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
public static HFileDataBlockEncoder createFromFileInfo(
FileInfo fileInfo, DataBlockEncoding preferredEncodingInCache)
throws IOException {
boolean hasPreferredCacheEncoding = preferredEncodingInCache != null
&& preferredEncodingInCache != DataBlockEncoding.NONE;
byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
if (dataBlockEncodingType == null && !hasPreferredCacheEncoding) {
return NoOpDataBlockEncoder.INSTANCE;
}
DataBlockEncoding onDisk;
if (dataBlockEncodingType == null) {
onDisk = DataBlockEncoding.NONE;
}else {
String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
try {
onDisk = DataBlockEncoding.valueOf(dataBlockEncodingStr);
} catch (IllegalArgumentException ex) {
throw new IOException("Invalid data block encoding type in file info: "
+ dataBlockEncodingStr, ex);
}
}
DataBlockEncoding inCache;
if (onDisk == DataBlockEncoding.NONE) {
// This is an "in-cache-only" encoding or fully-unencoded scenario.
// Either way, we use the given encoding (possibly NONE) specified by
// the column family in cache.
inCache = preferredEncodingInCache;
} else {
// Leave blocks in cache encoded the same way as they are on disk.
// If we switch encoding type for the CF or the in-cache-only encoding
// flag, old files will keep their encoding both on disk and in cache,
// but new files will be generated with the new encoding.
inCache = onDisk;
}
return new HFileDataBlockEncoderImpl(onDisk, inCache);
}
示例3: createFromFileInfo
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
public static HFileDataBlockEncoder createFromFileInfo(
FileInfo fileInfo, DataBlockEncoding preferredEncodingInCache)
throws IOException {
byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
if (dataBlockEncodingType == null) {
return NoOpDataBlockEncoder.INSTANCE;
}
String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
DataBlockEncoding onDisk;
try {
onDisk = DataBlockEncoding.valueOf(dataBlockEncodingStr);
} catch (IllegalArgumentException ex) {
throw new IOException("Invalid data block encoding type in file info: " +
dataBlockEncodingStr, ex);
}
DataBlockEncoding inCache;
if (onDisk == DataBlockEncoding.NONE) {
// This is an "in-cache-only" encoding or fully-unencoded scenario.
// Either way, we use the given encoding (possibly NONE) specified by
// the column family in cache.
inCache = preferredEncodingInCache;
} else {
// Leave blocks in cache encoded the same way as they are on disk.
// If we switch encoding type for the CF or the in-cache-only encoding
// flag, old files will keep their encoding both on disk and in cache,
// but new files will be generated with the new encoding.
inCache = onDisk;
}
return new HFileDataBlockEncoderImpl(onDisk, inCache);
}
示例4: HFileReaderV2
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
/**
* Opens a HFile. You must load the index before you can use it by calling
* {@link #loadFileInfo()}.
*
* @param path Path to HFile.
* @param trailer File trailer.
* @param fsdis input stream. Caller is responsible for closing the passed
* stream.
* @param size Length of the stream.
* @param closeIStream Whether to close the stream.
* @param cacheConf Cache configuration.
* @param preferredEncodingInCache the encoding to use in cache in case we
* have a choice. If the file is already encoded on disk, we will
* still use its on-disk encoding in cache.
*/
public HFileReaderV2(Path path, FixedFileTrailer trailer,
final FSDataInputStream fsdis, final FSDataInputStream fsdisNoFsChecksum,
final long size,
final boolean closeIStream, final CacheConfig cacheConf,
DataBlockEncoding preferredEncodingInCache, final HFileSystem hfs)
throws IOException {
super(path, trailer, fsdis, fsdisNoFsChecksum, size,
closeIStream, cacheConf, hfs);
trailer.expectMajorVersion(2);
validateMinorVersion(path, trailer.getMinorVersion());
HFileBlock.FSReaderV2 fsBlockReaderV2 = new HFileBlock.FSReaderV2(fsdis,
fsdisNoFsChecksum,
compressAlgo, fileSize, trailer.getMinorVersion(), hfs, path);
this.fsBlockReader = fsBlockReaderV2; // upcast
// Comparator class name is stored in the trailer in version 2.
comparator = trailer.createComparator();
dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
trailer.getNumDataIndexLevels(), this);
metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
Bytes.BYTES_RAWCOMPARATOR, 1);
// Parse load-on-open data.
HFileBlock.BlockIterator blockIter = fsBlockReaderV2.blockRange(
trailer.getLoadOnOpenDataOffset(),
fileSize - trailer.getTrailerSize());
// Data index. We also read statistics about the block index written after
// the root level.
dataBlockIndexReader.readMultiLevelIndexRoot(
blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
trailer.getDataIndexCount());
// Meta index.
metaBlockIndexReader.readRootIndex(
blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
trailer.getMetaIndexCount());
// File info
fileInfo = new FileInfo();
fileInfo.readFields(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
byte [] keyValueFormatVersion =
fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
includesMemstoreTS = keyValueFormatVersion != null &&
Bytes.toInt(keyValueFormatVersion) ==
HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE;
fsBlockReaderV2.setIncludesMemstoreTS(includesMemstoreTS);
if (includesMemstoreTS) {
decodeMemstoreTS = Bytes.toLong(fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY)) > 0;
}
// Read data block encoding algorithm name from file info.
dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo,
preferredEncodingInCache);
fsBlockReaderV2.setDataBlockEncoder(dataBlockEncoder);
// Store all other load-on-open blocks for further consumption.
HFileBlock b;
while ((b = blockIter.nextBlock()) != null) {
loadOnOpenBlocks.add(b);
}
}
示例5: HFileReaderV2
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
/**
* Opens a HFile. You must load the index before you can use it by calling
* {@link #loadFileInfo()}.
*
* @param path Path to HFile.
* @param trailer File trailer.
* @param fsdis input stream. Caller is responsible for closing the passed
* stream.
* @param size Length of the stream.
* @param closeIStream Whether to close the stream.
* @param cacheConf Cache configuration.
* @throws IOException
*/
public HFileReaderV2(Path path, FixedFileTrailer trailer,
final FSDataInputStream fsdis, final long size,
final boolean closeIStream, final CacheConfig cacheConf)
throws IOException {
super(path, trailer, fsdis, size, closeIStream, cacheConf);
trailer.expectVersion(2);
fsBlockReader = new HFileBlock.FSReaderV2(fsdis, compressAlgo,
fileSize);
// Comparator class name is stored in the trailer in version 2.
comparator = trailer.createComparator();
dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
trailer.getNumDataIndexLevels(), this);
metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
Bytes.BYTES_RAWCOMPARATOR, 1);
// Parse load-on-open data.
HFileBlock.BlockIterator blockIter = fsBlockReader.blockRange(
trailer.getLoadOnOpenDataOffset(),
fileSize - trailer.getTrailerSize());
// Data index. We also read statistics about the block index written after
// the root level.
dataBlockIndexReader.readMultiLevelIndexRoot(
blockIter.nextBlockAsStream(BlockType.ROOT_INDEX),
trailer.getDataIndexCount());
// Meta index.
metaBlockIndexReader.readRootIndex(
blockIter.nextBlockAsStream(BlockType.ROOT_INDEX),
trailer.getMetaIndexCount());
// File info
fileInfo = new FileInfo();
fileInfo.readFields(blockIter.nextBlockAsStream(BlockType.FILE_INFO));
lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
byte [] keyValueFormatVersion = fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
includesMemstoreTS = (keyValueFormatVersion != null &&
Bytes.toInt(keyValueFormatVersion) == HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE);
// Store all other load-on-open blocks for further consumption.
HFileBlock b;
while ((b = blockIter.nextBlock()) != null) {
loadOnOpenBlocks.add(b);
}
}
示例6: HFileReaderV2
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
/**
* Opens a HFile. You must load the index before you can use it by calling
* {@link #loadFileInfo()}.
*
* @param path Path to HFile.
* @param trailer File trailer.
* @param fsdis input stream.
* @param size Length of the stream.
* @param cacheConf Cache configuration.
* @param preferredEncodingInCache the encoding to use in cache in case we
* have a choice. If the file is already encoded on disk, we will
* still use its on-disk encoding in cache.
*/
public HFileReaderV2(Path path, FixedFileTrailer trailer,
final FSDataInputStreamWrapper fsdis, final long size, final CacheConfig cacheConf,
final HFileSystem hfs) throws IOException {
super(path, trailer, size, cacheConf, hfs);
trailer.expectMajorVersion(2);
validateMinorVersion(path, trailer.getMinorVersion());
HFileBlock.FSReaderV2 fsBlockReaderV2 = new HFileBlock.FSReaderV2(fsdis,
compressAlgo, fileSize, trailer.getMinorVersion(), hfs, path);
this.fsBlockReader = fsBlockReaderV2; // upcast
// Comparator class name is stored in the trailer in version 2.
comparator = trailer.createComparator();
dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
trailer.getNumDataIndexLevels(), this);
metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
KeyValue.RAW_COMPARATOR, 1);
// Parse load-on-open data.
HFileBlock.BlockIterator blockIter = fsBlockReaderV2.blockRange(
trailer.getLoadOnOpenDataOffset(),
fileSize - trailer.getTrailerSize());
// Data index. We also read statistics about the block index written after
// the root level.
dataBlockIndexReader.readMultiLevelIndexRoot(
blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
trailer.getDataIndexCount());
// Meta index.
metaBlockIndexReader.readRootIndex(
blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
trailer.getMetaIndexCount());
// File info
fileInfo = new FileInfo();
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
byte [] keyValueFormatVersion =
fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
includesMemstoreTS = keyValueFormatVersion != null &&
Bytes.toInt(keyValueFormatVersion) ==
HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE;
fsBlockReaderV2.setIncludesMemstoreTS(includesMemstoreTS);
if (includesMemstoreTS) {
decodeMemstoreTS = Bytes.toLong(fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY)) > 0;
}
// Read data block encoding algorithm name from file info.
dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo);
fsBlockReaderV2.setDataBlockEncoder(dataBlockEncoder);
// Store all other load-on-open blocks for further consumption.
HFileBlock b;
while ((b = blockIter.nextBlock()) != null) {
loadOnOpenBlocks.add(b);
}
}
示例7: HFileReaderV2
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
/**
* Opens a HFile. You must load the index before you can use it by calling
* {@link #loadFileInfo()}.
*
* @param path Path to HFile.
* @param trailer File trailer.
* @param fsdis input stream. Caller is responsible for closing the passed
* stream.
* @param size Length of the stream.
* @param closeIStream Whether to close the stream.
* @param cacheConf Cache configuration.
* @param preferredEncodingInCache the encoding to use in cache in case we
* have a choice. If the file is already encoded on disk, we will
* still use its on-disk encoding in cache.
*/
public HFileReaderV2(Path path, FixedFileTrailer trailer,
final FSDataInputStream fsdis, final FSDataInputStream fsdisNoFsChecksum,
final long size,
final boolean closeIStream, final CacheConfig cacheConf,
DataBlockEncoding preferredEncodingInCache, final HFileSystem hfs)
throws IOException {
super(path, trailer, fsdis, fsdisNoFsChecksum, size,
closeIStream, cacheConf, hfs);
trailer.expectMajorVersion(2);
validateMinorVersion(path, trailer.getMinorVersion());
HFileBlock.FSReaderV2 fsBlockReaderV2 = new HFileBlock.FSReaderV2(fsdis,
fsdisNoFsChecksum,
compressAlgo, fileSize, trailer.getMinorVersion(), hfs, path);
this.fsBlockReader = fsBlockReaderV2; // upcast
// Comparator class name is stored in the trailer in version 2.
comparator = trailer.createComparator();
dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
trailer.getNumDataIndexLevels(), this);
metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
Bytes.BYTES_RAWCOMPARATOR, 1);
// Parse load-on-open data.
HFileBlock.BlockIterator blockIter = fsBlockReaderV2.blockRange(
trailer.getLoadOnOpenDataOffset(),
fileSize - trailer.getTrailerSize());
// Data index. We also read statistics about the block index written after
// the root level.
dataBlockIndexReader.readMultiLevelIndexRoot(
blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
trailer.getDataIndexCount());
// Meta index.
metaBlockIndexReader.readRootIndex(
blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
trailer.getMetaIndexCount());
// File info
fileInfo = new FileInfo();
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
byte [] keyValueFormatVersion =
fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
includesMemstoreTS = keyValueFormatVersion != null &&
Bytes.toInt(keyValueFormatVersion) ==
HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE;
fsBlockReaderV2.setIncludesMemstoreTS(includesMemstoreTS);
// Read data block encoding algorithm name from file info.
dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo,
preferredEncodingInCache);
fsBlockReaderV2.setDataBlockEncoder(dataBlockEncoder);
// Store all other load-on-open blocks for further consumption.
HFileBlock b;
while ((b = blockIter.nextBlock()) != null) {
loadOnOpenBlocks.add(b);
}
}