当前位置: 首页>>代码示例>>Java>>正文


Java HFileBlock.getBufferReadOnly方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFileBlock.getBufferReadOnly方法的典型用法代码示例。如果您正苦于以下问题:Java HFileBlock.getBufferReadOnly方法的具体用法?Java HFileBlock.getBufferReadOnly怎么用?Java HFileBlock.getBufferReadOnly使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.hfile.HFileBlock的用法示例。


在下文中一共展示了HFileBlock.getBufferReadOnly方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: contains

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset,
      keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK, null);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf, bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:CompoundBloomFilter.java

示例2: contains

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:40,代码来源:CompoundBloomFilter.java

示例3: contains

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:40,代码来源:CompoundBloomFilter.java

示例4: writeToCache

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
public BucketEntry writeToCache(final IOEngine ioEngine,
    final BucketAllocator bucketAllocator,
    final UniqueIndexMap<Integer> deserialiserMap,
    final LongAdder realCacheSize) throws CacheFullException, IOException,
    BucketAllocatorException {
  int len = data.getSerializedLength();
  // This cacheable thing can't be serialized
  if (len == 0) return null;
  long offset = bucketAllocator.allocateBlock(len);
  BucketEntry bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory);
  bucketEntry.setDeserialiserReference(data.getDeserializer(), deserialiserMap);
  try {
    if (data instanceof HFileBlock) {
      // If an instance of HFileBlock, save on some allocations.
      HFileBlock block = (HFileBlock)data;
      ByteBuff sliceBuf = block.getBufferReadOnly();
      ByteBuffer metadata = block.getMetaData();
      if (LOG.isTraceEnabled()) {
        LOG.trace("Write offset=" + offset + ", len=" + len);
      }
      ioEngine.write(sliceBuf, offset);
      ioEngine.write(metadata, offset + len - metadata.limit());
    } else {
      ByteBuffer bb = ByteBuffer.allocate(len);
      data.serialize(bb);
      ioEngine.write(bb, offset);
    }
  } catch (IOException ioe) {
    // free it in bucket allocator
    bucketAllocator.freeBlock(offset);
    throw ioe;
  }

  realCacheSize.add(len);
  return bucketEntry;
}
 
开发者ID:apache,项目名称:hbase,代码行数:37,代码来源:BucketCache.java

示例5: contains

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + HFileBlock.HEADER_SIZE,
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:39,代码来源:CompoundBloomFilter.java

示例6: contains

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset,
      keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK, null);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:41,代码来源:CompoundBloomFilter.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFileBlock.getBufferReadOnly方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。