当前位置: 首页>>代码示例>>Java>>正文


Java KeyValue.getLength方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.KeyValue.getLength方法的典型用法代码示例。如果您正苦于以下问题:Java KeyValue.getLength方法的具体用法?Java KeyValue.getLength怎么用?Java KeyValue.getLength使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.KeyValue的用法示例。


在下文中一共展示了KeyValue.getLength方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testOne

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
@Test
public void testOne() throws IOException {
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  CountingOutputStream cos = new CountingOutputStream(baos);
  DataOutputStream dos = new DataOutputStream(cos);
  KeyValueCodec kvc = new KeyValueCodec();
  Codec.Encoder encoder = kvc.getEncoder(dos);
  final KeyValue kv =
    new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
  final long length = kv.getLength() + Bytes.SIZEOF_INT; 
  encoder.write(kv);
  encoder.flush();
  dos.close();
  long offset = cos.getCount();
  assertEquals(length, offset);
  CountingInputStream cis =
    new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
  DataInputStream dis = new DataInputStream(cis);
  Codec.Decoder decoder = kvc.getDecoder(dis);
  assertTrue(decoder.advance()); // First read should pull in the KV
  // Second read should trip over the end-of-stream  marker and return false
  assertFalse(decoder.advance());
  dis.close();
  assertEquals(length, cis.getCount());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestKeyValueCodec.java

示例2: writeKV

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
 * Compresses and writes ourKV to out, a DataOutput.
 * 
 * @param out the DataOutput
 * @param keyVal the KV to compress and write
 * @param writeContext the compressionContext to use.
 * @throws IOException
 */
public static void writeKV(final DataOutput out, KeyValue keyVal,
    CompressionContext writeContext) throws IOException {
  byte[] backingArray = keyVal.getBuffer();
  int offset = keyVal.getOffset();

  // we first write the KeyValue infrastructure as VInts.
  WritableUtils.writeVInt(out, keyVal.getKeyLength());
  WritableUtils.writeVInt(out, keyVal.getValueLength());
  WritableUtils.writeVInt(out, keyVal.getTagsLength());

  // now we write the row key, as the row key is likely to be repeated
  // We save space only if we attempt to compress elements with duplicates
  Compressor.writeCompressed(keyVal.getBuffer(), keyVal.getRowOffset(),
      keyVal.getRowLength(), out, writeContext.rowDict);


  // now family, if it exists. if it doesn't, we write a 0 length array.
  Compressor.writeCompressed(keyVal.getBuffer(), keyVal.getFamilyOffset(),
      keyVal.getFamilyLength(), out, writeContext.familyDict);

  // qualifier next
  Compressor.writeCompressed(keyVal.getBuffer(), keyVal.getQualifierOffset(),
      keyVal.getQualifierLength(), out,
      writeContext.qualifierDict);

  // now we write the rest uncompressed
  int pos = keyVal.getTimestampOffset();
  int remainingLength = keyVal.getLength() + offset - (pos);
  out.write(backingArray, pos, remainingLength);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:KeyValueCompression.java

示例3: write

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
@Override
public void write(Cell c) throws IOException {
  KeyValue kv = KeyValueUtil.ensureKeyValue(c);
  expectState(State.WRITING);
  this.dataBlockEncoder.encode(kv, dataBlockEncodingCtx, this.userDataStream);
  this.unencodedDataSizeWritten += kv.getLength();
  if (dataBlockEncodingCtx.getHFileContext().isIncludesMvcc()) {
    this.unencodedDataSizeWritten += WritableUtils.getVIntSize(kv.getMvccVersion());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:TestHFileBlockCompatibility.java

示例4: checkStatistics

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
 * Check statistics for given HFile for different data block encoders.
 * @param scanner Of file which will be compressed.
 * @param kvLimit Maximal count of KeyValue which will be processed.
 * @throws IOException thrown if scanner is invalid
 */
public void checkStatistics(final KeyValueScanner scanner, final int kvLimit)
    throws IOException {
  scanner.seek(KeyValue.LOWESTKEY);

  KeyValue currentKV;

  byte[] previousKey = null;
  byte[] currentKey;

  DataBlockEncoding[] encodings = DataBlockEncoding.values();

  ByteArrayOutputStream uncompressedOutputStream =
      new ByteArrayOutputStream();

  int j = 0;
  while ((currentKV = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) {
    // Iterates through key/value pairs
    j++;
    currentKey = currentKV.getKey();
    if (previousKey != null) {
      for (int i = 0; i < previousKey.length && i < currentKey.length &&
          previousKey[i] == currentKey[i]; ++i) {
        totalKeyRedundancyLength++;
      }
    }

    uncompressedOutputStream.write(currentKV.getBuffer(),
        currentKV.getOffset(), currentKV.getLength());

    previousKey = currentKey;

    int kLen = currentKV.getKeyLength();
    int vLen = currentKV.getValueLength();
    int cfLen = currentKV.getFamilyLength(currentKV.getFamilyOffset());
    int restLen = currentKV.getLength() - kLen - vLen;

    totalKeyLength += kLen;
    totalValueLength += vLen;
    totalPrefixLength += restLen;
    totalCFLength += cfLen;
  }

  rawKVs = uncompressedOutputStream.toByteArray();
  boolean useTag = (currentKV.getTagsLength() > 0);
  for (DataBlockEncoding encoding : encodings) {
    if (encoding == DataBlockEncoding.NONE) {
      continue;
    }
    DataBlockEncoder d = encoding.getEncoder();
    HFileContext meta = new HFileContextBuilder()
                        .withCompression(Compression.Algorithm.NONE)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(useTag).build();
    codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta ));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:63,代码来源:DataBlockEncodingTool.java

示例5: writeTestKeyValues

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
static int writeTestKeyValues(HFileBlock.Writer hbw, int seed, boolean includesMemstoreTS,
    boolean useTag) throws IOException {
  List<KeyValue> keyValues = new ArrayList<KeyValue>();
  Random randomizer = new Random(42l + seed); // just any fixed number

  // generate keyValues
  for (int i = 0; i < NUM_KEYVALUES; ++i) {
    byte[] row;
    long timestamp;
    byte[] family;
    byte[] qualifier;
    byte[] value;

    // generate it or repeat, it should compress well
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      row = keyValues.get(randomizer.nextInt(keyValues.size())).getRow();
    } else {
      row = new byte[FIELD_LENGTH];
      randomizer.nextBytes(row);
    }
    if (0 == i) {
      family = new byte[FIELD_LENGTH];
      randomizer.nextBytes(family);
    } else {
      family = keyValues.get(0).getFamily();
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      qualifier = keyValues.get(
          randomizer.nextInt(keyValues.size())).getQualifier();
    } else {
      qualifier = new byte[FIELD_LENGTH];
      randomizer.nextBytes(qualifier);
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      value = keyValues.get(randomizer.nextInt(keyValues.size())).getValue();
    } else {
      value = new byte[FIELD_LENGTH];
      randomizer.nextBytes(value);
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      timestamp = keyValues.get(
          randomizer.nextInt(keyValues.size())).getTimestamp();
    } else {
      timestamp = randomizer.nextLong();
    }
    if (!useTag) {
      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value));
    } else {
      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value, new Tag[] { new Tag(
          (byte) 1, Bytes.toBytes("myTagVal")) }));
    }
  }

  // sort it and write to stream
  int totalSize = 0;
  Collections.sort(keyValues, KeyValue.COMPARATOR);

  for (KeyValue kv : keyValues) {
    totalSize += kv.getLength();
    if (includesMemstoreTS) {
      long memstoreTS = randomizer.nextLong();
      kv.setSequenceId(memstoreTS);
      totalSize += WritableUtils.getVIntSize(memstoreTS);
    }
    hbw.write(kv);
  }
  return totalSize;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:69,代码来源:TestHFileBlock.java


注:本文中的org.apache.hadoop.hbase.KeyValue.getLength方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。