当前位置: 首页>>代码示例>>Java>>正文


Java HFileBlock类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFileBlock的典型用法代码示例。如果您正苦于以下问题:Java HFileBlock类的具体用法?Java HFileBlock怎么用?Java HFileBlock使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HFileBlock类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了HFileBlock类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: diskToCacheFormat

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
@Override
public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
  if (block.getBlockType() == BlockType.DATA) {
    if (!useEncodedScanner(isCompaction)) {
      // Unencoded block, and we don't want to encode in cache.
      return block;
    }
    // Encode the unencoded block with the in-cache encoding.
    return encodeDataBlock(block, inCache, block.doesIncludeMemstoreTS());
  }

  if (block.getBlockType() == BlockType.ENCODED_DATA) {
    if (block.getDataBlockEncodingId() == onDisk.getId()) {
      // The block is already in the desired in-cache encoding.
      return block;
    }
    // We don't want to re-encode a block in a different encoding. The HFile
    // reader should have been instantiated in such a way that we would not
    // have to do this.
    throw new AssertionError("Expected on-disk data block encoding " +
        onDisk + ", got " + block.getDataBlockEncoding());
  }
  return block;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:25,代码来源:HFileDataBlockEncoderImpl.java

示例2: encodeDataBlock

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
private HFileBlock encodeDataBlock(HFileBlock block,
    DataBlockEncoding algo, boolean includesMemstoreTS) {
  ByteBuffer compressedBuffer = encodeBufferToHFileBlockBuffer(
      block.getBufferWithoutHeader(), algo, includesMemstoreTS,
      block.getDummyHeaderForVersion());
  int sizeWithoutHeader = compressedBuffer.limit() - block.headerSize();
  HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
      block.getOnDiskSizeWithoutHeader(),
      sizeWithoutHeader, block.getPrevBlockOffset(),
      compressedBuffer, HFileBlock.FILL_HEADER, block.getOffset(),
      includesMemstoreTS, block.getMinorVersion(),
      block.getBytesPerChecksum(), block.getChecksumType(),
      block.getOnDiskDataSizeWithHeader());
  block.passSchemaMetricsTo(encodedBlock);
  return encodedBlock;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:17,代码来源:HFileDataBlockEncoderImpl.java

示例3: setHFileDeserializer

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public static void setHFileDeserializer()
{
       Field field = getProtectedField(HFileBlock.class, "blockDeserializer");

       if (field == null){
           LOG.error("Could not get access to HFileBlock.blockDeserializer");
       	return;
       }

       try
       {
       	CacheableDeserializer<Cacheable> serde = (CacheableDeserializer<Cacheable>) field.get(null);
       	if(serde != null){
       		deserializer.set(serde);
       	} else{
       		LOG.warn("HFileBlock.blockDeserializer is null");
       	}
       }
       catch (Exception e)
       {
           LOG.warn("unable to read HFileBlock.blockDeserializer");
       }
       
}
 
开发者ID:VladRodionov,项目名称:bigbase,代码行数:26,代码来源:CacheableSerializer.java

示例4: contains

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset,
      keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK, null);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf, bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:CompoundBloomFilter.java

示例5: verifyCodecs

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
/**
 * Verify if all data block encoders are working properly.
 *
 * @param scanner Of file which was compressed.
 * @param kvLimit Maximal count of KeyValue which will be processed.
 * @return true if all data block encoders compressed/decompressed correctly.
 * @throws IOException thrown if scanner is invalid
 */
public boolean verifyCodecs(final KeyValueScanner scanner, final int kvLimit)
    throws IOException {
  KeyValue currentKv;

  scanner.seek(KeyValue.LOWESTKEY);
  List<Iterator<Cell>> codecIterators =
      new ArrayList<Iterator<Cell>>();
  for(EncodedDataBlock codec : codecs) {
    codecIterators.add(codec.getIterator(HFileBlock.headerSize(useHBaseChecksum)));
  }

  int j = 0;
  while ((currentKv = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) {
    // Iterates through key/value pairs
    ++j;
    for (Iterator<Cell> it : codecIterators) {
      Cell c = it.next();
      KeyValue codecKv = KeyValueUtil.ensureKeyValue(c);
      if (codecKv == null || 0 != Bytes.compareTo(
          codecKv.getBuffer(), codecKv.getOffset(), codecKv.getLength(),
          currentKv.getBuffer(), currentKv.getOffset(),
          currentKv.getLength())) {
        if (codecKv == null) {
          LOG.error("There is a bug in codec " + it +
              " it returned null KeyValue,");
        } else {
          int prefix = 0;
          int limitLength = 2 * Bytes.SIZEOF_INT +
              Math.min(codecKv.getLength(), currentKv.getLength());
          while (prefix < limitLength &&
              codecKv.getBuffer()[prefix + codecKv.getOffset()] ==
              currentKv.getBuffer()[prefix + currentKv.getOffset()]) {
            prefix++;
          }

          LOG.error("There is bug in codec " + it.toString() +
              "\n on element " + j +
              "\n codecKv.getKeyLength() " + codecKv.getKeyLength() +
              "\n codecKv.getValueLength() " + codecKv.getValueLength() +
              "\n codecKv.getLength() " + codecKv.getLength() +
              "\n currentKv.getKeyLength() " + currentKv.getKeyLength() +
              "\n currentKv.getValueLength() " + currentKv.getValueLength() +
              "\n codecKv.getLength() " + currentKv.getLength() +
              "\n currentKV rowLength " + currentKv.getRowLength() +
              " familyName " + currentKv.getFamilyLength() +
              " qualifier " + currentKv.getQualifierLength() +
              "\n prefix " + prefix +
              "\n codecKv   '" + Bytes.toStringBinary(codecKv.getBuffer(),
                  codecKv.getOffset(), prefix) + "' diff '" +
                  Bytes.toStringBinary(codecKv.getBuffer(),
                      codecKv.getOffset() + prefix, codecKv.getLength() -
                      prefix) + "'" +
              "\n currentKv '" + Bytes.toStringBinary(
                 currentKv.getBuffer(),
                 currentKv.getOffset(), prefix) + "' diff '" +
                 Bytes.toStringBinary(currentKv.getBuffer(),
                     currentKv.getOffset() + prefix, currentKv.getLength() -
                     prefix) + "'"
              );
        }
        return false;
      }
    }
  }

  LOG.info("Verification was successful!");

  return true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:78,代码来源:DataBlockEncodingTool.java

示例6: contains

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:40,代码来源:CompoundBloomFilter.java

示例7: contains

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:40,代码来源:CompoundBloomFilter.java

示例8: writeToCache

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
public BucketEntry writeToCache(final IOEngine ioEngine,
    final BucketAllocator bucketAllocator,
    final UniqueIndexMap<Integer> deserialiserMap,
    final LongAdder realCacheSize) throws CacheFullException, IOException,
    BucketAllocatorException {
  int len = data.getSerializedLength();
  // This cacheable thing can't be serialized
  if (len == 0) return null;
  long offset = bucketAllocator.allocateBlock(len);
  BucketEntry bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory);
  bucketEntry.setDeserialiserReference(data.getDeserializer(), deserialiserMap);
  try {
    if (data instanceof HFileBlock) {
      // If an instance of HFileBlock, save on some allocations.
      HFileBlock block = (HFileBlock)data;
      ByteBuff sliceBuf = block.getBufferReadOnly();
      ByteBuffer metadata = block.getMetaData();
      if (LOG.isTraceEnabled()) {
        LOG.trace("Write offset=" + offset + ", len=" + len);
      }
      ioEngine.write(sliceBuf, offset);
      ioEngine.write(metadata, offset + len - metadata.limit());
    } else {
      ByteBuffer bb = ByteBuffer.allocate(len);
      data.serialize(bb);
      ioEngine.write(bb, offset);
    }
  } catch (IOException ioe) {
    // free it in bucket allocator
    bucketAllocator.freeBlock(offset);
    throw ioe;
  }

  realCacheSize.add(len);
  return bucketEntry;
}
 
开发者ID:apache,项目名称:hbase,代码行数:37,代码来源:BucketCache.java

示例9: readStoreFile

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
  sf.initReader();
  HFile.Reader reader = sf.getReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, -1, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:36,代码来源:TestCacheOnWriteInSchema.java

示例10: contains

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + HFileBlock.HEADER_SIZE,
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:39,代码来源:CompoundBloomFilter.java

示例11: contains

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset,
      keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK, null);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:41,代码来源:CompoundBloomFilter.java

示例12: getFirstKeyInBlock

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
@Override
protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) {
  ByteBuffer buffer = curBlock.getBufferWithoutHeader();
  buffer.rewind();
  byte pNum = buffer.get();
  //LOG.info("Shen Li: PFileScanner.getFirstKeyInBlock called, pNum is " + pNum);
  buffer.position(buffer.position() + 
      (pNum + 1) * PKeyValue.POINTER_SIZE);
  int klen = buffer.getInt();
  buffer.getInt();
  ByteBuffer keyBuff = buffer.slice();
  keyBuff.limit(klen);
  keyBuff.rewind();
  return keyBuff;
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:16,代码来源:PFileReader.java

示例13: EncodedDataBlock

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
/**
 * Create a buffer which will be encoded using dataBlockEncoder.
 * @param dataBlockEncoder Algorithm used for compression.
 * @param encoding encoding type used
 * @param rawKVs
 */
public EncodedDataBlock(DataBlockEncoder dataBlockEncoder,
    boolean includesMemstoreTS, DataBlockEncoding encoding, byte[] rawKVs) {
  Preconditions.checkNotNull(encoding,
      "Cannot create encoded data block with null encoder");
  this.dataBlockEncoder = dataBlockEncoder;
  encodingCtx =
      dataBlockEncoder.newDataBlockEncodingContext(Compression.Algorithm.NONE,
          encoding, HFileBlock.DUMMY_HEADER);
  this.rawKVs = rawKVs;
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:17,代码来源:EncodedDataBlock.java

示例14: getEncodingContext

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
private HFileBlockEncodingContext getEncodingContext(
    Compression.Algorithm algo, DataBlockEncoding encoding) {
  DataBlockEncoder encoder = encoding.getEncoder();
  if (encoder != null) {
    return encoder.newDataBlockEncodingContext(algo, encoding,
        HFileBlock.DUMMY_HEADER);
  } else {
    return new HFileBlockDefaultEncodingContext(algo, encoding, HFileBlock.DUMMY_HEADER);
  }
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:11,代码来源:TestDataBlockEncoders.java

示例15: addStoreFileCutpoints

import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入依赖的package包/类
private static int addStoreFileCutpoints(List<byte[]> cutpoints, HFile.Reader fileReader, long storeFileInBytes, int carry, Pair<byte[], byte[]> range, int splitBlockSize) throws IOException {
    HFileBlockIndex.BlockIndexReader indexReader = fileReader.getDataBlockIndexReader();
    int size = indexReader.getRootBlockCount();
    int levels = fileReader.getTrailer().getNumDataIndexLevels();
    if (levels == 1) {
        int incrementalSize = (int) (size > 0 ? storeFileInBytes / (float) size : storeFileInBytes);
        int sizeCounter = 0;
        for (int i = 0; i < size; ++i) {
            if (sizeCounter >= splitBlockSize) {
                sizeCounter = 0;
                KeyValue tentative = KeyValue.createKeyValueFromKey(indexReader.getRootBlockKey(i));
                if (CellUtils.isKeyValueInRange(tentative, range)) {
                    cutpoints.add(tentative.getRow());
                }
            }
            sizeCounter += incrementalSize;
        }
     return sizeCounter;
    } else {
        for (int i = 0; i < size; ++i) {
            HFileBlock block = fileReader.readBlock(
                    indexReader.getRootBlockOffset(i),
                    indexReader.getRootBlockDataSize(i),
                    true, true, false, true,
                    levels == 2 ? BlockType.LEAF_INDEX : BlockType.INTERMEDIATE_INDEX,
                    fileReader.getDataBlockEncoding());
            carry = addIndexCutpoints(fileReader, block.getBufferWithoutHeader(), levels - 1,  cutpoints, storeFileInBytes / size, carry, range, splitBlockSize);
        }
     return carry;
    }
}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:32,代码来源:HRegionUtil.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFileBlock类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。