当前位置: 首页>>代码示例>>Java>>正文


Java DataBlockEncoding.getEncoder方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.getEncoder方法的典型用法代码示例。如果您正苦于以下问题:Java DataBlockEncoding.getEncoder方法的具体用法?Java DataBlockEncoding.getEncoder怎么用?Java DataBlockEncoding.getEncoder使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.encoding.DataBlockEncoding的用法示例。


在下文中一共展示了DataBlockEncoding.getEncoder方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: encodeBufferToHFileBlockBuffer

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
private ByteBuffer encodeBufferToHFileBlockBuffer(ByteBuffer in,
    DataBlockEncoding algo, boolean includesMemstoreTS,
    byte[] dummyHeader) {
  ByteArrayOutputStream encodedStream = new ByteArrayOutputStream();
  DataOutputStream dataOut = new DataOutputStream(encodedStream);
  DataBlockEncoder encoder = algo.getEncoder();
  try {
    encodedStream.write(dummyHeader);
    algo.writeIdInBytes(dataOut);
    encoder.compressKeyValues(dataOut, in,
        includesMemstoreTS);
  } catch (IOException e) {
    throw new RuntimeException(String.format("Bug in data block encoder " +
        "'%s', it probably requested too much data", algo.toString()), e);
  }
  return ByteBuffer.wrap(encodedStream.toByteArray());
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:18,代码来源:HFileDataBlockEncoderImpl.java

示例2: EncodedScannerV2

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
public EncodedScannerV2(HFileReaderV2 reader, boolean cacheBlocks,
    boolean pread, boolean isCompaction, HFileContext meta) {
  super(reader, cacheBlocks, pread, isCompaction);
  DataBlockEncoding encoding = reader.dataBlockEncoder.getDataBlockEncoding();
  dataBlockEncoder = encoding.getEncoder();
  decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(meta);
  seeker = dataBlockEncoder.createSeeker(
    reader.getComparator(), decodingCtx);
  this.meta = meta;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:HFileReaderV2.java

示例3: encodeBufferToHFileBlockBuffer

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
/**
 * Encode a block of key value pairs.
 *
 * @param in input data to encode
 * @param algo encoding algorithm
 * @param encodeCtx where will the output data be stored
 */
private void encodeBufferToHFileBlockBuffer(ByteBuffer in, DataBlockEncoding algo,
    HFileBlockEncodingContext encodeCtx) {
  DataBlockEncoder encoder = algo.getEncoder();
  try {
    encoder.encodeKeyValues(in, encodeCtx);
  } catch (IOException e) {
    throw new RuntimeException(String.format(
        "Bug in data block encoder "
            + "'%s', it probably requested too much data, " +
            "exception message: %s.",
            algo.toString(), e.getMessage()), e);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:21,代码来源:HFileDataBlockEncoderImpl.java

示例4: checkStatistics

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
/**
 * Check statistics for given HFile for different data block encoders.
 * @param scanner Of file which will be compressed.
 * @param kvLimit Maximal count of KeyValue which will be processed.
 * @throws IOException thrown if scanner is invalid
 */
public void checkStatistics(final KeyValueScanner scanner, final int kvLimit)
    throws IOException {
  scanner.seek(KeyValue.LOWESTKEY);

  KeyValue currentKV;

  byte[] previousKey = null;
  byte[] currentKey;

  DataBlockEncoding[] encodings = DataBlockEncoding.values();

  ByteArrayOutputStream uncompressedOutputStream =
      new ByteArrayOutputStream();

  int j = 0;
  while ((currentKV = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) {
    // Iterates through key/value pairs
    j++;
    currentKey = currentKV.getKey();
    if (previousKey != null) {
      for (int i = 0; i < previousKey.length && i < currentKey.length &&
          previousKey[i] == currentKey[i]; ++i) {
        totalKeyRedundancyLength++;
      }
    }

    uncompressedOutputStream.write(currentKV.getBuffer(),
        currentKV.getOffset(), currentKV.getLength());

    previousKey = currentKey;

    int kLen = currentKV.getKeyLength();
    int vLen = currentKV.getValueLength();
    int cfLen = currentKV.getFamilyLength(currentKV.getFamilyOffset());
    int restLen = currentKV.getLength() - kLen - vLen;

    totalKeyLength += kLen;
    totalValueLength += vLen;
    totalPrefixLength += restLen;
    totalCFLength += cfLen;
  }

  rawKVs = uncompressedOutputStream.toByteArray();
  boolean useTag = (currentKV.getTagsLength() > 0);
  for (DataBlockEncoding encoding : encodings) {
    if (encoding == DataBlockEncoding.NONE) {
      continue;
    }
    DataBlockEncoder d = encoding.getEncoder();
    HFileContext meta = new HFileContextBuilder()
                        .withCompression(Compression.Algorithm.NONE)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(useTag).build();
    codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta ));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:63,代码来源:DataBlockEncodingTool.java

示例5: checkStatistics

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
/**
 * Check statistics for given HFile for different data block encoders.
 * @param scanner Of file which will be compressed.
 * @param kvLimit Maximal count of KeyValue which will be processed.
 * @throws IOException thrown if scanner is invalid
 */
public void checkStatistics(final KeyValueScanner scanner, final int kvLimit)
    throws IOException {
  scanner.seek(KeyValue.LOWESTKEY);

  KeyValue currentKV;

  byte[] previousKey = null;
  byte[] currentKey;

  DataBlockEncoding[] encodings = DataBlockEncoding.values();

  ByteArrayOutputStream uncompressedOutputStream =
      new ByteArrayOutputStream();

  int j = 0;
  while ((currentKV = scanner.next()) != null && j < kvLimit) {
    // Iterates through key/value pairs
    j++;
    currentKey = currentKV.getKey();
    if (previousKey != null) {
      for (int i = 0; i < previousKey.length && i < currentKey.length &&
          previousKey[i] == currentKey[i]; ++i) {
        totalKeyRedundancyLength++;
      }
    }

    uncompressedOutputStream.write(currentKV.getBuffer(),
        currentKV.getOffset(), currentKV.getLength());

    previousKey = currentKey;

    int kLen = currentKV.getKeyLength();
    int vLen = currentKV.getValueLength();
    int cfLen = currentKV.getFamilyLength(currentKV.getFamilyOffset());
    int restLen = currentKV.getLength() - kLen - vLen;

    totalKeyLength += kLen;
    totalValueLength += vLen;
    totalPrefixLength += restLen;
    totalCFLength += cfLen;
  }

  rawKVs = uncompressedOutputStream.toByteArray();
  boolean useTag = (currentKV.getTagsLength() > 0);
  for (DataBlockEncoding encoding : encodings) {
    if (encoding == DataBlockEncoding.NONE) {
      continue;
    }
    DataBlockEncoder d = encoding.getEncoder();
    HFileContext meta = new HFileContextBuilder()
                        .withCompression(Compression.Algorithm.NONE)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(useTag).build();
    codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta ));
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:63,代码来源:DataBlockEncodingTool.java

示例6: writeEncodedBlock

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
static void writeEncodedBlock(Algorithm algo, DataBlockEncoding encoding,
     DataOutputStream dos, final List<Integer> encodedSizes,
    final List<ByteBuffer> encodedBlocks, int blockId, 
    boolean includesMemstoreTS, byte[] dummyHeader, boolean useTag) throws IOException {
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  DoubleOutputStream doubleOutputStream =
      new DoubleOutputStream(dos, baos);
  writeTestKeyValues(doubleOutputStream, blockId, includesMemstoreTS, useTag);
  ByteBuffer rawBuf = ByteBuffer.wrap(baos.toByteArray());
  rawBuf.rewind();

  DataBlockEncoder encoder = encoding.getEncoder();
  int headerLen = dummyHeader.length;
  byte[] encodedResultWithHeader = null;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .build();
  if (encoder != null) {
    HFileBlockEncodingContext encodingCtx = encoder.newDataBlockEncodingContext(encoding,
        dummyHeader, meta);
    encoder.encodeKeyValues(rawBuf, encodingCtx);
    encodedResultWithHeader =
        encodingCtx.getUncompressedBytesWithHeader();
  } else {
    HFileBlockDefaultEncodingContext defaultEncodingCtx = new HFileBlockDefaultEncodingContext(
        encoding, dummyHeader, meta);
    byte[] rawBufWithHeader =
        new byte[rawBuf.array().length + headerLen];
    System.arraycopy(rawBuf.array(), 0, rawBufWithHeader,
        headerLen, rawBuf.array().length);
    defaultEncodingCtx.compressAfterEncodingWithBlockType(rawBufWithHeader,
        BlockType.DATA);
    encodedResultWithHeader =
      defaultEncodingCtx.getUncompressedBytesWithHeader();
  }
  final int encodedSize =
      encodedResultWithHeader.length - headerLen;
  if (encoder != null) {
    // We need to account for the two-byte encoding algorithm ID that
    // comes after the 24-byte block header but before encoded KVs.
    headerLen += DataBlockEncoding.ID_SIZE;
  }
  byte[] encodedDataSection =
      new byte[encodedResultWithHeader.length - headerLen];
  System.arraycopy(encodedResultWithHeader, headerLen,
      encodedDataSection, 0, encodedDataSection.length);
  final ByteBuffer encodedBuf =
      ByteBuffer.wrap(encodedDataSection);
  encodedSizes.add(encodedSize);
  encodedBlocks.add(encodedBuf);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:54,代码来源:TestHFileBlock.java


注:本文中的org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.getEncoder方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。