当前位置: 首页>>代码示例>>Java>>正文


Java BlockType类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.BlockType的典型用法代码示例。如果您正苦于以下问题:Java BlockType类的具体用法?Java BlockType怎么用?Java BlockType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BlockType类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了BlockType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: encodeKeyValues

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
/**
 * Copied from BufferedDataBlockEncoder. Almost definitely can be improved, but i'm not familiar
 * enough with the concept of the HFileBlockEncodingContext.
 */
@Override
public void encodeKeyValues(ByteBuffer in,
    HFileBlockEncodingContext blkEncodingCtx) throws IOException {
  if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
    throw new IOException(this.getClass().getName() + " only accepts "
        + HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
  }

  HFileBlockDefaultEncodingContext encodingCtx
      = (HFileBlockDefaultEncodingContext) blkEncodingCtx;
  encodingCtx.prepareEncoding();
  DataOutputStream dataOut = encodingCtx.getOutputStreamForEncoder();
  internalEncodeKeyValues(dataOut, in, encodingCtx.getHFileContext().isIncludesMvcc(),
      encodingCtx.getHFileContext().isIncludesTags());

  //do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
  if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
    encodingCtx.postEncoding(BlockType.ENCODED_DATA);
  } else {
    encodingCtx.postEncoding(BlockType.DATA);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:27,代码来源:PrefixTreeCodec.java

示例2: encodeKeyValues

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
/**
 * Copied from BufferedDataBlockEncoder. Almost definitely can be improved, but i'm not familiar
 * enough with the concept of the HFileBlockEncodingContext.
 */
@Override
public void encodeKeyValues(ByteBuffer in, boolean includesMvccVersion,
    HFileBlockEncodingContext blkEncodingCtx) throws IOException {
  if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
    throw new IOException(this.getClass().getName() + " only accepts "
        + HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
  }

  HFileBlockDefaultEncodingContext encodingCtx
      = (HFileBlockDefaultEncodingContext) blkEncodingCtx;
  encodingCtx.prepareEncoding();
  DataOutputStream dataOut = encodingCtx.getOutputStreamForEncoder();
  internalEncodeKeyValues(dataOut, in, includesMvccVersion);

  //do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
  if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
    encodingCtx.postEncoding(BlockType.ENCODED_DATA);
  } else {
    encodingCtx.postEncoding(BlockType.DATA);
  }
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:26,代码来源:PrefixTreeCodec.java

示例3: encodeKeyValues

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public void encodeKeyValues(ByteBuffer in,
    boolean includesMemstoreTS,
    HFileBlockEncodingContext blkEncodingCtx) throws IOException {
  if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
    throw new IOException (this.getClass().getName() + " only accepts "
        + HFileBlockDefaultEncodingContext.class.getName() + " as the " +
        "encoding context.");
  }

  HFileBlockDefaultEncodingContext encodingCtx =
      (HFileBlockDefaultEncodingContext) blkEncodingCtx;
  encodingCtx.prepareEncoding();
  DataOutputStream dataOut =
      ((HFileBlockDefaultEncodingContext) encodingCtx)
      .getOutputStreamForEncoder();
  internalEncodeKeyValues(dataOut, in, includesMemstoreTS);
  if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
    encodingCtx.postEncoding(BlockType.ENCODED_DATA);
  } else {
    encodingCtx.postEncoding(BlockType.DATA);
  }
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:24,代码来源:BufferedDataBlockEncoder.java

示例4: compressAfterEncoding

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
/**
 * @param uncompressedBytesWithHeader
 * @param blockType
 * @param headerBytes
 * @throws IOException
 */
protected void compressAfterEncoding(byte[] uncompressedBytesWithHeader,
    BlockType blockType, byte[] headerBytes) throws IOException {
  this.uncompressedBytesWithHeader = uncompressedBytesWithHeader;
  if (compressionAlgorithm != NONE) {
    compressedByteStream.reset();
    compressedByteStream.write(headerBytes);
    compressionStream.resetState();
    compressionStream.write(uncompressedBytesWithHeader,
        headerBytes.length, uncompressedBytesWithHeader.length
            - headerBytes.length);

    compressionStream.flush();
    compressionStream.finish();
    onDiskBytesWithHeader = compressedByteStream.toByteArray();
  } else {
    onDiskBytesWithHeader = uncompressedBytesWithHeader;
  }
  this.blockType = blockType;
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:26,代码来源:HFileBlockDefaultEncodingContext.java

示例5: contains

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset,
      keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK, null);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf, bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:CompoundBloomFilter.java

示例6: setBloomFilterFaulty

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
private void setBloomFilterFaulty(BlockType blockType) {
  if (blockType == BlockType.GENERAL_BLOOM_META) {
    setGeneralBloomFilterFaulty();
  } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) {
    setDeleteFamilyBloomFilterFaulty();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:StoreFile.java

示例7: endBlockEncoding

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out,
    byte[] uncompressedBytesWithHeader) throws IOException {
  PrefixTreeEncodingState state = (PrefixTreeEncodingState) encodingCtx.getEncodingState();
  PrefixTreeEncoder builder = state.builder;
  builder.flush();
  EncoderFactory.checkIn(builder);
  // do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
  if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
    encodingCtx.postEncoding(BlockType.ENCODED_DATA);
  } else {
    encodingCtx.postEncoding(BlockType.DATA);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:PrefixTreeCodec.java

示例8: endBlockEncoding

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out,
    byte[] uncompressedBytesWithHeader) throws IOException {
  BufferedDataBlockEncodingState state = (BufferedDataBlockEncodingState) encodingCtx
      .getEncodingState();
  // Write the unencodedDataSizeWritten (with header size)
  Bytes.putInt(uncompressedBytesWithHeader, HConstants.HFILEBLOCK_HEADER_SIZE
      + DataBlockEncoding.ID_SIZE, state.unencodedDataSizeWritten
      );
  if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
    encodingCtx.postEncoding(BlockType.ENCODED_DATA);
  } else {
    encodingCtx.postEncoding(BlockType.DATA);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:BufferedDataBlockEncoder.java

示例9: contains

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:40,代码来源:CompoundBloomFilter.java

示例10: testIncrements

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Test
public void testIncrements() {
  Random rand = new Random(23982737L);
  for (int i = 1; i <= 3; ++i) {
    final String tableName = "table" + i;
    for (int j = 1; j <= 3; ++j) {
      final String cfName = "cf" + j;
      SchemaMetrics sm = SchemaMetrics.getInstance(tableName, cfName);
      for (boolean isInBloom : BOOL_VALUES) {
        sm.updateBloomMetrics(isInBloom);
        checkMetrics();
      }

      for (BlockCategory blockCat : BlockType.BlockCategory.values()) {
        if (blockCat == BlockCategory.ALL_CATEGORIES) {
          continue;
        }

        for (boolean isCompaction : BOOL_VALUES) {
          sm.updateOnCacheHit(blockCat, isCompaction);
          checkMetrics();
          sm.updateOnCacheMiss(blockCat, isCompaction, rand.nextInt());
          checkMetrics();
        }

        for (boolean isEviction : BOOL_VALUES) {
          sm.updateOnCachePutOrEvict(blockCat, (isEviction ? -1 : 1)
              * rand.nextInt(1024 * 1024), isEviction);
        }
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:34,代码来源:TestSchemaMetrics.java

示例11: setBloomFilterFaulty

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
private void setBloomFilterFaulty(BlockType blockType) {
    if (blockType == BlockType.GENERAL_BLOOM_META) {
        setGeneralBloomFilterFaulty();
    } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) {
        setDeleteFamilyBloomFilterFaulty();
    }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:8,代码来源:StoreFile.java

示例12: contains

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:40,代码来源:CompoundBloomFilter.java

示例13: encodeKeyValues

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public void encodeKeyValues(ByteBuffer in,
    HFileBlockEncodingContext blkEncodingCtx) throws IOException {
  if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
    throw new IOException (this.getClass().getName() + " only accepts "
        + HFileBlockDefaultEncodingContext.class.getName() + " as the " +
        "encoding context.");
  }

  HFileBlockDefaultEncodingContext encodingCtx =
      (HFileBlockDefaultEncodingContext) blkEncodingCtx;
  encodingCtx.prepareEncoding();
  DataOutputStream dataOut = encodingCtx.getOutputStreamForEncoder();
  if (encodingCtx.getHFileContext().isIncludesTags()
      && encodingCtx.getHFileContext().isCompressTags()) {
    if (encodingCtx.getTagCompressionContext() != null) {
      // It will be overhead to create the TagCompressionContext again and again for every block
      // encoding.
      encodingCtx.getTagCompressionContext().clear();
    } else {
      try {
        TagCompressionContext tagCompressionContext = new TagCompressionContext(
            LRUDictionary.class, Byte.MAX_VALUE);
        encodingCtx.setTagCompressionContext(tagCompressionContext);
      } catch (Exception e) {
        throw new IOException("Failed to initialize TagCompressionContext", e);
      }
    }
  }
  internalEncodeKeyValues(dataOut, in, encodingCtx);
  if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
    encodingCtx.postEncoding(BlockType.ENCODED_DATA);
  } else {
    encodingCtx.postEncoding(BlockType.DATA);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:37,代码来源:BufferedDataBlockEncoder.java

示例14: postEncoding

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public void postEncoding(BlockType blockType)
    throws IOException {
  dataOut.flush();
  compressAfterEncodingWithBlockType(encodedStream.toByteArray(), blockType);
  this.blockType = blockType;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:8,代码来源:HFileBlockDefaultEncodingContext.java

示例15: postEncoding

import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
protected void postEncoding(HFileBlockEncodingContext encodingCtx)
    throws IOException {
  if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
    encodingCtx.postEncoding(BlockType.ENCODED_DATA);
  } else {
    encodingCtx.postEncoding(BlockType.DATA);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:9,代码来源:AbstractDataBlockEncoder.java


注:本文中的org.apache.hadoop.hbase.io.hfile.BlockType类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。