当前位置: 首页>>代码示例>>Java>>正文


Java DataBlockEncoding.toString方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.toString方法的典型用法代码示例。如果您正苦于以下问题:Java DataBlockEncoding.toString方法的具体用法?Java DataBlockEncoding.toString怎么用?Java DataBlockEncoding.toString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.encoding.DataBlockEncoding的用法示例。


在下文中一共展示了DataBlockEncoding.toString方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setDataBlockEncoding

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
/**
 * Set data block encoding algorithm used in block cache.
 * @param type What kind of data block encoding will be used.
 * @return this (for chained invocation)
 */
public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
  String name;
  if (type != null) {
    name = type.toString();
  } else {
    name = DataBlockEncoding.NONE.toString();
  }
  return setValue(DATA_BLOCK_ENCODING, name);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:HColumnDescriptor.java

示例2: testDataBlockEncoding

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
/**
 * Test encoding/decoding data blocks.
 * @throws IOException a bug or a problem with temporary files.
 */
@Test
public void testDataBlockEncoding() throws IOException {
  final int numBlocks = 5;
  for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
    for (boolean pread : new boolean[] { false, true }) {
      for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
        Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
            + algo + "_" + encoding.toString());
        FSDataOutputStream os = fs.create(path);
        HFileDataBlockEncoder dataBlockEncoder =
            new HFileDataBlockEncoderImpl(encoding);
        HFileBlock.Writer hbw = new HFileBlock.Writer(algo, dataBlockEncoder,
            includesMemstoreTS,
            HFileReaderV2.MAX_MINOR_VERSION,
            HFile.DEFAULT_CHECKSUM_TYPE,
            HFile.DEFAULT_BYTES_PER_CHECKSUM);
        long totalSize = 0;
        final List<Integer> encodedSizes = new ArrayList<Integer>();
        final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
        for (int blockId = 0; blockId < numBlocks; ++blockId) {
          DataOutputStream dos = hbw.startWriting(BlockType.DATA);
          writeEncodedBlock(encoding, dos, encodedSizes, encodedBlocks,
              blockId, includesMemstoreTS);

          hbw.writeHeaderAndData(os);
          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, algo,
            totalSize);
        hbr.setDataBlockEncoder(dataBlockEncoder);
        hbr.setIncludesMemstoreTS(includesMemstoreTS);

        HFileBlock b;
        int pos = 0;
        for (int blockId = 0; blockId < numBlocks; ++blockId) {
          b = hbr.readBlockData(pos, -1, -1, pread);
          assertEquals(0, HFile.getChecksumFailuresCount());
          b.sanityCheck();
          pos += b.getOnDiskSizeWithHeader();

          assertEquals((int) encodedSizes.get(blockId),
              b.getUncompressedSizeWithoutHeader());
          ByteBuffer actualBuffer = b.getBufferWithoutHeader();
          if (encoding != DataBlockEncoding.NONE) {
            // We expect a two-byte big-endian encoding id.
            assertEquals(0, actualBuffer.get(0));
            assertEquals(encoding.getId(), actualBuffer.get(1));
            actualBuffer.position(2);
            actualBuffer = actualBuffer.slice();
          }

          ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
          expectedBuffer.rewind();

          // test if content matches, produce nice message
          assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding,
              pread);
        }
        is.close();
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:71,代码来源:TestHFileBlock.java

示例3: testDataBlockEncoding

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
/**
 * Test encoding/decoding data blocks.
 * @throws IOException a bug or a problem with temporary files.
 */
@Test
public void testDataBlockEncoding() throws IOException {
  final int numBlocks = 5;
  for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
    for (boolean pread : new boolean[] { false, true }) {
      for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
        LOG.info("testDataBlockEncoding algo " + algo + 
                 " pread = " + pread +
                 " encoding " + encoding);
        Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
            + algo + "_" + encoding.toString());
        FSDataOutputStream os = fs.create(path);
        HFileDataBlockEncoder dataBlockEncoder =
            new HFileDataBlockEncoderImpl(encoding);
        Writer hbw = new Writer(algo, dataBlockEncoder,
            includesMemstoreTS);
        long totalSize = 0;
        final List<Integer> encodedSizes = new ArrayList<Integer>();
        final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
        for (int blockId = 0; blockId < numBlocks; ++blockId) {
          DataOutputStream dos = hbw.startWriting(BlockType.DATA);
          TestHFileBlock.writeEncodedBlock(encoding, dos, encodedSizes, encodedBlocks,
              blockId, includesMemstoreTS);

          hbw.writeHeaderAndData(os);
          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, is, algo,
            totalSize, MINOR_VERSION, fs, path);
        hbr.setDataBlockEncoder(dataBlockEncoder);
        hbr.setIncludesMemstoreTS(includesMemstoreTS);

        HFileBlock b;
        int pos = 0;
        for (int blockId = 0; blockId < numBlocks; ++blockId) {
          b = hbr.readBlockData(pos, -1, -1, pread);
          b.sanityCheck();
          pos += b.getOnDiskSizeWithHeader();

          assertEquals((int) encodedSizes.get(blockId),
              b.getUncompressedSizeWithoutHeader());
          ByteBuffer actualBuffer = b.getBufferWithoutHeader();
          if (encoding != DataBlockEncoding.NONE) {
            // We expect a two-byte big-endian encoding id.
            assertEquals(0, actualBuffer.get(0));
            assertEquals(encoding.getId(), actualBuffer.get(1));
            actualBuffer.position(2);
            actualBuffer = actualBuffer.slice();
          }

          ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
          expectedBuffer.rewind();

          // test if content matches, produce nice message
          TestHFileBlock.assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding,
              pread);
        }
        is.close();
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:70,代码来源:TestHFileBlockCompatibility.java

示例4: testInternals

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
private void testInternals() throws IOException {
  final int numBlocks = 5;
  if(includesTag) {
    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
  }
  for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
    for (boolean pread : new boolean[] { false, true }) {
      for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
        Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
            + algo + "_" + encoding.toString());
        FSDataOutputStream os = fs.create(path);
        HFileDataBlockEncoder dataBlockEncoder =
            new HFileDataBlockEncoderImpl(encoding);
        HFileContext meta = new HFileContextBuilder()
                            .withCompression(algo)
                            .withIncludesMvcc(includesMemstoreTS)
                            .withIncludesTags(includesTag)
                            .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                            .withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
                            .build();
        HFileBlock.Writer hbw = new HFileBlock.Writer(dataBlockEncoder,
           meta);
        long totalSize = 0;
        final List<Integer> encodedSizes = new ArrayList<Integer>();
        final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
        for (int blockId = 0; blockId < numBlocks; ++blockId) {
          DataOutputStream dos = hbw.startWriting(BlockType.DATA);
          writeEncodedBlock(algo, encoding, dos, encodedSizes, encodedBlocks,
              blockId, includesMemstoreTS, HConstants.HFILEBLOCK_DUMMY_HEADER, includesTag);
          hbw.writeHeaderAndData(os);
          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        meta = new HFileContextBuilder()
              .withHBaseCheckSum(true)
              .withCompression(algo)
              .withIncludesMvcc(includesMemstoreTS)
              .withIncludesTags(includesTag)
              .build();
        HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
        hbr.setDataBlockEncoder(dataBlockEncoder);
        hbr.setIncludesMemstoreTS(includesMemstoreTS);
        HFileBlock b;
        int pos = 0;
        for (int blockId = 0; blockId < numBlocks; ++blockId) {
          b = hbr.readBlockData(pos, -1, -1, pread);
          assertEquals(0, HFile.getChecksumFailuresCount());
          b.sanityCheck();
          pos += b.getOnDiskSizeWithHeader();
          assertEquals((int) encodedSizes.get(blockId),
              b.getUncompressedSizeWithoutHeader());
          ByteBuffer actualBuffer = b.getBufferWithoutHeader();
          if (encoding != DataBlockEncoding.NONE) {
            // We expect a two-byte big-endian encoding id.
            assertEquals(0, actualBuffer.get(0));
            assertEquals(encoding.getId(), actualBuffer.get(1));
            actualBuffer.position(2);
            actualBuffer = actualBuffer.slice();
          }

          ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
          expectedBuffer.rewind();

          // test if content matches, produce nice message
          assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding,
              pread);
        }
        is.close();
      }
    }
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:75,代码来源:TestHFileBlock.java

示例5: testDataBlockEncoding

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
/**
 * Test encoding/decoding data blocks.
 * @throws IOException a bug or a problem with temporary files.
 */
@Test
public void testDataBlockEncoding() throws IOException {
  if(includesTag) {
    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
  }
  final int numBlocks = 5;
  for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
    for (boolean pread : new boolean[] { false, true }) {
      for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
        LOG.info("testDataBlockEncoding algo " + algo +
                 " pread = " + pread +
                 " encoding " + encoding);
        Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
            + algo + "_" + encoding.toString());
        FSDataOutputStream os = fs.create(path);
        HFileDataBlockEncoder dataBlockEncoder =
            new HFileDataBlockEncoderImpl(encoding);
        TestHFileBlockCompatibility.Writer hbw =
            new TestHFileBlockCompatibility.Writer(algo,
                dataBlockEncoder, includesMemstoreTS, includesTag);
        long totalSize = 0;
        final List<Integer> encodedSizes = new ArrayList<Integer>();
        final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
        for (int blockId = 0; blockId < numBlocks; ++blockId) {
          DataOutputStream dos = hbw.startWriting(BlockType.DATA);
          TestHFileBlock.writeEncodedBlock(algo, encoding, dos, encodedSizes,
              encodedBlocks, blockId, includesMemstoreTS,
              TestHFileBlockCompatibility.Writer.DUMMY_HEADER, includesTag);

          hbw.writeHeaderAndData(os);
          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileContext meta = new HFileContextBuilder()
                            .withHBaseCheckSum(false)
                            .withIncludesMvcc(includesMemstoreTS)
                            .withIncludesTags(includesTag)
                            .withCompression(algo)
                            .build();
        HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
            totalSize, fs, path, meta);
        hbr.setDataBlockEncoder(dataBlockEncoder);
        hbr.setIncludesMemstoreTS(includesMemstoreTS);

        HFileBlock b;
        int pos = 0;
        for (int blockId = 0; blockId < numBlocks; ++blockId) {
          b = hbr.readBlockData(pos, -1, -1, pread);
          b.sanityCheck();
          pos += b.getOnDiskSizeWithHeader();

          assertEquals((int) encodedSizes.get(blockId),
              b.getUncompressedSizeWithoutHeader());
          ByteBuffer actualBuffer = b.getBufferWithoutHeader();
          if (encoding != DataBlockEncoding.NONE) {
            // We expect a two-byte big-endian encoding id.
            assertEquals(0, actualBuffer.get(0));
            assertEquals(encoding.getId(), actualBuffer.get(1));
            actualBuffer.position(2);
            actualBuffer = actualBuffer.slice();
          }

          ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
          expectedBuffer.rewind();

          // test if content matches, produce nice message
          TestHFileBlock.assertBuffersEqual(expectedBuffer, actualBuffer,
            algo, encoding, pread);
        }
        is.close();
      }
    }
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:81,代码来源:TestHFileBlockCompatibility.java


注:本文中的org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.toString方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。