當前位置: 首頁>>代碼示例>>Java>>正文


Java KeyValue.getLength方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.KeyValue.getLength方法的典型用法代碼示例。如果您正苦於以下問題:Java KeyValue.getLength方法的具體用法?Java KeyValue.getLength怎麽用?Java KeyValue.getLength使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.KeyValue的用法示例。


在下文中一共展示了KeyValue.getLength方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testOne

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
@Test
public void testOne() throws IOException {
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  CountingOutputStream cos = new CountingOutputStream(baos);
  DataOutputStream dos = new DataOutputStream(cos);
  KeyValueCodec kvc = new KeyValueCodec();
  Codec.Encoder encoder = kvc.getEncoder(dos);
  final KeyValue kv =
    new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
  final long length = kv.getLength() + Bytes.SIZEOF_INT; 
  encoder.write(kv);
  encoder.flush();
  dos.close();
  long offset = cos.getCount();
  assertEquals(length, offset);
  CountingInputStream cis =
    new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
  DataInputStream dis = new DataInputStream(cis);
  Codec.Decoder decoder = kvc.getDecoder(dis);
  assertTrue(decoder.advance()); // First read should pull in the KV
  // Second read should trip over the end-of-stream  marker and return false
  assertFalse(decoder.advance());
  dis.close();
  assertEquals(length, cis.getCount());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:TestKeyValueCodec.java

示例2: writeKV

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
 * Compresses and writes ourKV to out, a DataOutput.
 * 
 * @param out the DataOutput
 * @param keyVal the KV to compress and write
 * @param writeContext the compressionContext to use.
 * @throws IOException
 */
public static void writeKV(final DataOutput out, KeyValue keyVal,
    CompressionContext writeContext) throws IOException {
  byte[] backingArray = keyVal.getBuffer();
  int offset = keyVal.getOffset();

  // we first write the KeyValue infrastructure as VInts.
  WritableUtils.writeVInt(out, keyVal.getKeyLength());
  WritableUtils.writeVInt(out, keyVal.getValueLength());
  WritableUtils.writeVInt(out, keyVal.getTagsLength());

  // now we write the row key, as the row key is likely to be repeated
  // We save space only if we attempt to compress elements with duplicates
  Compressor.writeCompressed(keyVal.getBuffer(), keyVal.getRowOffset(),
      keyVal.getRowLength(), out, writeContext.rowDict);


  // now family, if it exists. if it doesn't, we write a 0 length array.
  Compressor.writeCompressed(keyVal.getBuffer(), keyVal.getFamilyOffset(),
      keyVal.getFamilyLength(), out, writeContext.familyDict);

  // qualifier next
  Compressor.writeCompressed(keyVal.getBuffer(), keyVal.getQualifierOffset(),
      keyVal.getQualifierLength(), out,
      writeContext.qualifierDict);

  // now we write the rest uncompressed
  int pos = keyVal.getTimestampOffset();
  int remainingLength = keyVal.getLength() + offset - (pos);
  out.write(backingArray, pos, remainingLength);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:KeyValueCompression.java

示例3: write

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
@Override
public void write(Cell c) throws IOException {
  KeyValue kv = KeyValueUtil.ensureKeyValue(c);
  expectState(State.WRITING);
  this.dataBlockEncoder.encode(kv, dataBlockEncodingCtx, this.userDataStream);
  this.unencodedDataSizeWritten += kv.getLength();
  if (dataBlockEncodingCtx.getHFileContext().isIncludesMvcc()) {
    this.unencodedDataSizeWritten += WritableUtils.getVIntSize(kv.getMvccVersion());
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:11,代碼來源:TestHFileBlockCompatibility.java

示例4: checkStatistics

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
 * Check statistics for given HFile for different data block encoders.
 * @param scanner Of file which will be compressed.
 * @param kvLimit Maximal count of KeyValue which will be processed.
 * @throws IOException thrown if scanner is invalid
 */
public void checkStatistics(final KeyValueScanner scanner, final int kvLimit)
    throws IOException {
  scanner.seek(KeyValue.LOWESTKEY);

  KeyValue currentKV;

  byte[] previousKey = null;
  byte[] currentKey;

  DataBlockEncoding[] encodings = DataBlockEncoding.values();

  ByteArrayOutputStream uncompressedOutputStream =
      new ByteArrayOutputStream();

  int j = 0;
  while ((currentKV = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) {
    // Iterates through key/value pairs
    j++;
    currentKey = currentKV.getKey();
    if (previousKey != null) {
      for (int i = 0; i < previousKey.length && i < currentKey.length &&
          previousKey[i] == currentKey[i]; ++i) {
        totalKeyRedundancyLength++;
      }
    }

    uncompressedOutputStream.write(currentKV.getBuffer(),
        currentKV.getOffset(), currentKV.getLength());

    previousKey = currentKey;

    int kLen = currentKV.getKeyLength();
    int vLen = currentKV.getValueLength();
    int cfLen = currentKV.getFamilyLength(currentKV.getFamilyOffset());
    int restLen = currentKV.getLength() - kLen - vLen;

    totalKeyLength += kLen;
    totalValueLength += vLen;
    totalPrefixLength += restLen;
    totalCFLength += cfLen;
  }

  rawKVs = uncompressedOutputStream.toByteArray();
  boolean useTag = (currentKV.getTagsLength() > 0);
  for (DataBlockEncoding encoding : encodings) {
    if (encoding == DataBlockEncoding.NONE) {
      continue;
    }
    DataBlockEncoder d = encoding.getEncoder();
    HFileContext meta = new HFileContextBuilder()
                        .withCompression(Compression.Algorithm.NONE)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(useTag).build();
    codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta ));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:63,代碼來源:DataBlockEncodingTool.java

示例5: writeTestKeyValues

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
static int writeTestKeyValues(HFileBlock.Writer hbw, int seed, boolean includesMemstoreTS,
    boolean useTag) throws IOException {
  List<KeyValue> keyValues = new ArrayList<KeyValue>();
  Random randomizer = new Random(42l + seed); // just any fixed number

  // generate keyValues
  for (int i = 0; i < NUM_KEYVALUES; ++i) {
    byte[] row;
    long timestamp;
    byte[] family;
    byte[] qualifier;
    byte[] value;

    // generate it or repeat, it should compress well
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      row = keyValues.get(randomizer.nextInt(keyValues.size())).getRow();
    } else {
      row = new byte[FIELD_LENGTH];
      randomizer.nextBytes(row);
    }
    if (0 == i) {
      family = new byte[FIELD_LENGTH];
      randomizer.nextBytes(family);
    } else {
      family = keyValues.get(0).getFamily();
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      qualifier = keyValues.get(
          randomizer.nextInt(keyValues.size())).getQualifier();
    } else {
      qualifier = new byte[FIELD_LENGTH];
      randomizer.nextBytes(qualifier);
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      value = keyValues.get(randomizer.nextInt(keyValues.size())).getValue();
    } else {
      value = new byte[FIELD_LENGTH];
      randomizer.nextBytes(value);
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      timestamp = keyValues.get(
          randomizer.nextInt(keyValues.size())).getTimestamp();
    } else {
      timestamp = randomizer.nextLong();
    }
    if (!useTag) {
      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value));
    } else {
      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value, new Tag[] { new Tag(
          (byte) 1, Bytes.toBytes("myTagVal")) }));
    }
  }

  // sort it and write to stream
  int totalSize = 0;
  Collections.sort(keyValues, KeyValue.COMPARATOR);

  for (KeyValue kv : keyValues) {
    totalSize += kv.getLength();
    if (includesMemstoreTS) {
      long memstoreTS = randomizer.nextLong();
      kv.setSequenceId(memstoreTS);
      totalSize += WritableUtils.getVIntSize(memstoreTS);
    }
    hbw.write(kv);
  }
  return totalSize;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:69,代碼來源:TestHFileBlock.java


注:本文中的org.apache.hadoop.hbase.KeyValue.getLength方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。