当前位置: 首页>>代码示例>>Java>>正文


Java FileInfo.get方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.FileInfo.get方法的典型用法代码示例。如果您正苦于以下问题:Java FileInfo.get方法的具体用法?Java FileInfo.get怎么用?Java FileInfo.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.hfile.HFile.FileInfo的用法示例。


在下文中一共展示了FileInfo.get方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createFromFileInfo

import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
public static HFileDataBlockEncoder createFromFileInfo(
    FileInfo fileInfo) throws IOException {
  DataBlockEncoding encoding = DataBlockEncoding.NONE;
  byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
  if (dataBlockEncodingType != null) {
    String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
    try {
      encoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
    } catch (IllegalArgumentException ex) {
      throw new IOException("Invalid data block encoding type in file info: "
        + dataBlockEncodingStr, ex);
    }
  }

  if (encoding == DataBlockEncoding.NONE) {
    return NoOpDataBlockEncoder.INSTANCE;
  }
  return new HFileDataBlockEncoderImpl(encoding);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:HFileDataBlockEncoderImpl.java

示例2: createFromFileInfo

import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
public static HFileDataBlockEncoder createFromFileInfo(
    FileInfo fileInfo, DataBlockEncoding preferredEncodingInCache)
    throws IOException {
  
  boolean hasPreferredCacheEncoding = preferredEncodingInCache != null
      && preferredEncodingInCache != DataBlockEncoding.NONE;

  byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
  if (dataBlockEncodingType == null && !hasPreferredCacheEncoding) {
    return NoOpDataBlockEncoder.INSTANCE;
  }

  DataBlockEncoding onDisk;
  if (dataBlockEncodingType == null) {
    onDisk = DataBlockEncoding.NONE;
  }else {
    String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
    try {
      onDisk = DataBlockEncoding.valueOf(dataBlockEncodingStr);
    } catch (IllegalArgumentException ex) {
      throw new IOException("Invalid data block encoding type in file info: "
          + dataBlockEncodingStr, ex);
    }
  }

  DataBlockEncoding inCache;
  if (onDisk == DataBlockEncoding.NONE) {
    // This is an "in-cache-only" encoding or fully-unencoded scenario.
    // Either way, we use the given encoding (possibly NONE) specified by
    // the column family in cache.
    inCache = preferredEncodingInCache;
  } else {
    // Leave blocks in cache encoded the same way as they are on disk.
    // If we switch encoding type for the CF or the in-cache-only encoding
    // flag, old files will keep their encoding both on disk and in cache,
    // but new files will be generated with the new encoding.
    inCache = onDisk;
  }
  return new HFileDataBlockEncoderImpl(onDisk, inCache);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:41,代码来源:HFileDataBlockEncoderImpl.java

示例3: createFromFileInfo

import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
public static HFileDataBlockEncoder createFromFileInfo(
    FileInfo fileInfo, DataBlockEncoding preferredEncodingInCache)
    throws IOException {
  byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
  if (dataBlockEncodingType == null) {
    return NoOpDataBlockEncoder.INSTANCE;
  }

  String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
  DataBlockEncoding onDisk;
  try {
    onDisk = DataBlockEncoding.valueOf(dataBlockEncodingStr);
  } catch (IllegalArgumentException ex) {
    throw new IOException("Invalid data block encoding type in file info: " +
        dataBlockEncodingStr, ex);
  }

  DataBlockEncoding inCache;
  if (onDisk == DataBlockEncoding.NONE) {
    // This is an "in-cache-only" encoding or fully-unencoded scenario.
    // Either way, we use the given encoding (possibly NONE) specified by
    // the column family in cache.
    inCache = preferredEncodingInCache;
  } else {
    // Leave blocks in cache encoded the same way as they are on disk.
    // If we switch encoding type for the CF or the in-cache-only encoding
    // flag, old files will keep their encoding both on disk and in cache,
    // but new files will be generated with the new encoding.
    inCache = onDisk;
  }
  return new HFileDataBlockEncoderImpl(onDisk, inCache);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:33,代码来源:HFileDataBlockEncoderImpl.java

示例4: HFileReaderV2

import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
/**
 * Opens a HFile. You must load the index before you can use it by calling
 * {@link #loadFileInfo()}.
 *
 * @param path Path to HFile.
 * @param trailer File trailer.
 * @param fsdis input stream. Caller is responsible for closing the passed
 *          stream.
 * @param size Length of the stream.
 * @param closeIStream Whether to close the stream.
 * @param cacheConf Cache configuration.
 * @param preferredEncodingInCache the encoding to use in cache in case we
 *          have a choice. If the file is already encoded on disk, we will
 *          still use its on-disk encoding in cache.
 */
public HFileReaderV2(Path path, FixedFileTrailer trailer,
    final FSDataInputStream fsdis, final FSDataInputStream fsdisNoFsChecksum,
    final long size,
    final boolean closeIStream, final CacheConfig cacheConf,
    DataBlockEncoding preferredEncodingInCache, final HFileSystem hfs)
    throws IOException {
  super(path, trailer, fsdis, fsdisNoFsChecksum, size, 
        closeIStream, cacheConf, hfs);
  trailer.expectMajorVersion(2);
  validateMinorVersion(path, trailer.getMinorVersion());
  HFileBlock.FSReaderV2 fsBlockReaderV2 = new HFileBlock.FSReaderV2(fsdis,
      fsdisNoFsChecksum,
      compressAlgo, fileSize, trailer.getMinorVersion(), hfs, path);
  this.fsBlockReader = fsBlockReaderV2; // upcast

  // Comparator class name is stored in the trailer in version 2.
  comparator = trailer.createComparator();
  dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
      trailer.getNumDataIndexLevels(), this);
  metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
      Bytes.BYTES_RAWCOMPARATOR, 1);

  // Parse load-on-open data.

  HFileBlock.BlockIterator blockIter = fsBlockReaderV2.blockRange(
      trailer.getLoadOnOpenDataOffset(),
      fileSize - trailer.getTrailerSize());

  // Data index. We also read statistics about the block index written after
  // the root level.
  dataBlockIndexReader.readMultiLevelIndexRoot(
      blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
      trailer.getDataIndexCount());

  // Meta index.
  metaBlockIndexReader.readRootIndex(
      blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
      trailer.getMetaIndexCount());

  // File info
  fileInfo = new FileInfo();
  fileInfo.readFields(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
  lastKey = fileInfo.get(FileInfo.LASTKEY);
  avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
  avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
  byte [] keyValueFormatVersion =
      fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
  includesMemstoreTS = keyValueFormatVersion != null &&
      Bytes.toInt(keyValueFormatVersion) ==
          HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE;
  fsBlockReaderV2.setIncludesMemstoreTS(includesMemstoreTS);
  if (includesMemstoreTS) {
    decodeMemstoreTS = Bytes.toLong(fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY)) > 0;
  }

  // Read data block encoding algorithm name from file info.
  dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo,
      preferredEncodingInCache);
  fsBlockReaderV2.setDataBlockEncoder(dataBlockEncoder);

  // Store all other load-on-open blocks for further consumption.
  HFileBlock b;
  while ((b = blockIter.nextBlock()) != null) {
    loadOnOpenBlocks.add(b);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:82,代码来源:HFileReaderV2.java

示例5: HFileReaderV2

import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
/**
 * Opens a HFile. You must load the index before you can use it by calling
 * {@link #loadFileInfo()}.
 *
 * @param path Path to HFile.
 * @param trailer File trailer.
 * @param fsdis input stream. Caller is responsible for closing the passed
 *          stream.
 * @param size Length of the stream.
 * @param closeIStream Whether to close the stream.
 * @param cacheConf Cache configuration.
 * @throws IOException
 */
public HFileReaderV2(Path path, FixedFileTrailer trailer,
    final FSDataInputStream fsdis, final long size,
    final boolean closeIStream, final CacheConfig cacheConf)
throws IOException {
  super(path, trailer, fsdis, size, closeIStream, cacheConf);

  trailer.expectVersion(2);
  fsBlockReader = new HFileBlock.FSReaderV2(fsdis, compressAlgo,
      fileSize);

  // Comparator class name is stored in the trailer in version 2.
  comparator = trailer.createComparator();
  dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
      trailer.getNumDataIndexLevels(), this);
  metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
      Bytes.BYTES_RAWCOMPARATOR, 1);

  // Parse load-on-open data.

  HFileBlock.BlockIterator blockIter = fsBlockReader.blockRange(
      trailer.getLoadOnOpenDataOffset(),
      fileSize - trailer.getTrailerSize());

  // Data index. We also read statistics about the block index written after
  // the root level.
  dataBlockIndexReader.readMultiLevelIndexRoot(
      blockIter.nextBlockAsStream(BlockType.ROOT_INDEX),
      trailer.getDataIndexCount());

  // Meta index.
  metaBlockIndexReader.readRootIndex(
      blockIter.nextBlockAsStream(BlockType.ROOT_INDEX),
      trailer.getMetaIndexCount());

  // File info
  fileInfo = new FileInfo();
  fileInfo.readFields(blockIter.nextBlockAsStream(BlockType.FILE_INFO));
  lastKey = fileInfo.get(FileInfo.LASTKEY);
  avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
  avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
  byte [] keyValueFormatVersion = fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
  includesMemstoreTS = (keyValueFormatVersion != null &&
      Bytes.toInt(keyValueFormatVersion) == HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE);

  // Store all other load-on-open blocks for further consumption.
  HFileBlock b;
  while ((b = blockIter.nextBlock()) != null) {
    loadOnOpenBlocks.add(b);
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:64,代码来源:HFileReaderV2.java

示例6: HFileReaderV2

import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
/**
 * Opens a HFile. You must load the index before you can use it by calling
 * {@link #loadFileInfo()}.
 *
 * @param path Path to HFile.
 * @param trailer File trailer.
 * @param fsdis input stream.
 * @param size Length of the stream.
 * @param cacheConf Cache configuration.
 * @param preferredEncodingInCache the encoding to use in cache in case we
 *          have a choice. If the file is already encoded on disk, we will
 *          still use its on-disk encoding in cache.
 */
public HFileReaderV2(Path path, FixedFileTrailer trailer,
    final FSDataInputStreamWrapper fsdis, final long size, final CacheConfig cacheConf,
    final HFileSystem hfs) throws IOException {
  super(path, trailer, size, cacheConf, hfs);
  trailer.expectMajorVersion(2);
  validateMinorVersion(path, trailer.getMinorVersion());
  HFileBlock.FSReaderV2 fsBlockReaderV2 = new HFileBlock.FSReaderV2(fsdis,
      compressAlgo, fileSize, trailer.getMinorVersion(), hfs, path);
  this.fsBlockReader = fsBlockReaderV2; // upcast

  // Comparator class name is stored in the trailer in version 2.
  comparator = trailer.createComparator();
  dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
      trailer.getNumDataIndexLevels(), this);
  metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
      KeyValue.RAW_COMPARATOR, 1);

  // Parse load-on-open data.

  HFileBlock.BlockIterator blockIter = fsBlockReaderV2.blockRange(
      trailer.getLoadOnOpenDataOffset(),
      fileSize - trailer.getTrailerSize());

  // Data index. We also read statistics about the block index written after
  // the root level.
  dataBlockIndexReader.readMultiLevelIndexRoot(
      blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
      trailer.getDataIndexCount());

  // Meta index.
  metaBlockIndexReader.readRootIndex(
      blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
      trailer.getMetaIndexCount());

  // File info
  fileInfo = new FileInfo();
  fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
  lastKey = fileInfo.get(FileInfo.LASTKEY);
  avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
  avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
  byte [] keyValueFormatVersion =
      fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
  includesMemstoreTS = keyValueFormatVersion != null &&
      Bytes.toInt(keyValueFormatVersion) ==
          HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE;
  fsBlockReaderV2.setIncludesMemstoreTS(includesMemstoreTS);
  if (includesMemstoreTS) {
    decodeMemstoreTS = Bytes.toLong(fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY)) > 0;
  }

  // Read data block encoding algorithm name from file info.
  dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo);
  fsBlockReaderV2.setDataBlockEncoder(dataBlockEncoder);

  // Store all other load-on-open blocks for further consumption.
  HFileBlock b;
  while ((b = blockIter.nextBlock()) != null) {
    loadOnOpenBlocks.add(b);
  }
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:74,代码来源:HFileReaderV2.java

示例7: HFileReaderV2

import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; //导入方法依赖的package包/类
/**
 * Opens a HFile. You must load the index before you can use it by calling
 * {@link #loadFileInfo()}.
 *
 * @param path Path to HFile.
 * @param trailer File trailer.
 * @param fsdis input stream. Caller is responsible for closing the passed
 *          stream.
 * @param size Length of the stream.
 * @param closeIStream Whether to close the stream.
 * @param cacheConf Cache configuration.
 * @param preferredEncodingInCache the encoding to use in cache in case we
 *          have a choice. If the file is already encoded on disk, we will
 *          still use its on-disk encoding in cache.
 */
public HFileReaderV2(Path path, FixedFileTrailer trailer,
    final FSDataInputStream fsdis, final FSDataInputStream fsdisNoFsChecksum,
    final long size,
    final boolean closeIStream, final CacheConfig cacheConf,
    DataBlockEncoding preferredEncodingInCache, final HFileSystem hfs)
    throws IOException {
  super(path, trailer, fsdis, fsdisNoFsChecksum, size, 
        closeIStream, cacheConf, hfs);
  trailer.expectMajorVersion(2);
  validateMinorVersion(path, trailer.getMinorVersion());
  HFileBlock.FSReaderV2 fsBlockReaderV2 = new HFileBlock.FSReaderV2(fsdis,
      fsdisNoFsChecksum,
      compressAlgo, fileSize, trailer.getMinorVersion(), hfs, path);
  this.fsBlockReader = fsBlockReaderV2; // upcast

  // Comparator class name is stored in the trailer in version 2.
  comparator = trailer.createComparator();
  dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
      trailer.getNumDataIndexLevels(), this);
  metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
      Bytes.BYTES_RAWCOMPARATOR, 1);

  // Parse load-on-open data.

  HFileBlock.BlockIterator blockIter = fsBlockReaderV2.blockRange(
      trailer.getLoadOnOpenDataOffset(),
      fileSize - trailer.getTrailerSize());

  // Data index. We also read statistics about the block index written after
  // the root level.
  dataBlockIndexReader.readMultiLevelIndexRoot(
      blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
      trailer.getDataIndexCount());

  // Meta index.
  metaBlockIndexReader.readRootIndex(
      blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
      trailer.getMetaIndexCount());

  // File info
  fileInfo = new FileInfo();
  fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
  lastKey = fileInfo.get(FileInfo.LASTKEY);
  avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
  avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
  byte [] keyValueFormatVersion =
      fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
  includesMemstoreTS = keyValueFormatVersion != null &&
      Bytes.toInt(keyValueFormatVersion) ==
          HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE;
  fsBlockReaderV2.setIncludesMemstoreTS(includesMemstoreTS);

  // Read data block encoding algorithm name from file info.
  dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo,
      preferredEncodingInCache);
  fsBlockReaderV2.setDataBlockEncoder(dataBlockEncoder);

  // Store all other load-on-open blocks for further consumption.
  HFileBlock b;
  while ((b = blockIter.nextBlock()) != null) {
    loadOnOpenBlocks.add(b);
  }
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:79,代码来源:HFileReaderV2.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFile.FileInfo.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。