当前位置: 首页>>代码示例>>Java>>正文


Java KeyValueUtil类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.KeyValueUtil的典型用法代码示例。如果您正苦于以下问题:Java KeyValueUtil类的具体用法?Java KeyValueUtil怎么用?Java KeyValueUtil使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


KeyValueUtil类属于org.apache.hadoop.hbase包,在下文中一共展示了KeyValueUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testSeekToBlockWithDecreasingCommonPrefix

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
/**
 * Test seeking while file is encoded.
 */
@Test
public void testSeekToBlockWithDecreasingCommonPrefix() throws IOException {
  List<KeyValue> sampleKv = new ArrayList<KeyValue>();
  KeyValue kv1 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"),
      Bytes.toBytes("q1"), Bytes.toBytes("val"));
  sampleKv.add(kv1);
  KeyValue kv2 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"),
      Bytes.toBytes("q2"), Bytes.toBytes("val"));
  sampleKv.add(kv2);
  KeyValue kv3 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"),
      Bytes.toBytes("q3"), Bytes.toBytes("val"));
  sampleKv.add(kv3);
  KeyValue kv4 = new KeyValue(Bytes.toBytes("row11baa"), Bytes.toBytes("f1"),
      Bytes.toBytes("q1"), Bytes.toBytes("val"));
  sampleKv.add(kv4);
  KeyValue toSeek = KeyValueUtil.createLastOnRow(kv3.getRowArray(), kv3.getRowOffset(),
      kv3.getRowLength(), null, 0, 0, null, 0, 0);
  seekToTheKey(kv3, sampleKv, toSeek);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestSeekToBlockWithEncoders.java

示例2: map

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
@Override
public void map(WALKey key, WALEdit value,
  Context context)
throws IOException {
  try {
    // skip all other tables
    if (Bytes.equals(table, key.getTablename().getName())) {
      for (Cell cell : value.getCells()) {
        KeyValue kv = KeyValueUtil.ensureKeyValueTypeForMR(cell);
        if (WALEdit.isMetaEditFamily(kv.getFamily())) continue;
        context.write(new ImmutableBytesWritable(kv.getRow()), kv);
      }
    }
  } catch (InterruptedException e) {
    e.printStackTrace();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:WALPlayer.java

示例3: map

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
/**
 * @param row  The current table row key.
 * @param value  The columns.
 * @param context  The current context.
 * @throws IOException When something is broken with the data.
 */
@Override
public void map(ImmutableBytesWritable row, Result value,
  Context context)
throws IOException {
  try {
    if (LOG.isTraceEnabled()) {
      LOG.trace("Considering the row."
          + Bytes.toString(row.get(), row.getOffset(), row.getLength()));
    }
    if (filter == null || !filter.filterRowKey(row.get(), row.getOffset(), row.getLength())) {
      for (Cell kv : value.rawCells()) {
        kv = filterKv(filter, kv);
        // skip if we filtered it out
        if (kv == null) continue;
        // TODO get rid of ensureKeyValue
        context.write(row, KeyValueUtil.ensureKeyValueTypeForMR(convertKv(kv, cfRenameMap)));
      }
    }
  } catch (InterruptedException e) {
    e.printStackTrace();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:Import.java

示例4: passesKeyRangeFilter

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
/**
 * Checks whether the given scan rowkey range overlaps with the current storefile's
 *
 * @param scan the scan specification. Used to determine the rowkey range.
 * @return true if there is overlap, false otherwise
 */
public boolean passesKeyRangeFilter(Scan scan) {
  if (this.getFirstKey() == null || this.getLastKey() == null) {
    // the file is empty
    return false;
  }
  if (Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) && Bytes
      .equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) {
    return true;
  }
  KeyValue smallestScanKeyValue = scan.isReversed() ?
      KeyValueUtil.createFirstOnRow(scan.getStopRow()) :
      KeyValueUtil.createFirstOnRow(scan.getStartRow());
  KeyValue largestScanKeyValue = scan.isReversed() ?
      KeyValueUtil.createLastOnRow(scan.getStartRow()) :
      KeyValueUtil.createLastOnRow(scan.getStopRow());
  boolean nonOverLapping =
      (getComparator().compareFlatKey(this.getFirstKey(), largestScanKeyValue.getKey()) > 0
          && !Bytes.equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(),
          HConstants.EMPTY_END_ROW))
          || getComparator().compareFlatKey(this.getLastKey(), smallestScanKeyValue.getKey())
          < 0;
  return !nonOverLapping;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:StoreFile.java

示例5: write

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  LOG.warn("WALEdit is being serialized to writable - only expected in test code");
  out.writeInt(VERSION_2);
  out.writeInt(cells.size());
  // We interleave the two lists for code simplicity
  for (Cell cell : cells) {
    // This is not used in any of the core code flows so it is just fine to convert to KV
    KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
    if (compressionContext != null) {
      KeyValueCompression.writeKV(out, kv, compressionContext);
    } else{
      KeyValue.write(kv, out);
    }
  }
  if (scopes == null) {
    out.writeInt(0);
  } else {
    out.writeInt(scopes.size());
    for (byte[] key : scopes.keySet()) {
      Bytes.writeByteArray(out, key);
      out.writeInt(scopes.get(key));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:WALEdit.java

示例6: seekToLastRow

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
@Override
public synchronized boolean seekToLastRow() {
  Cell first = cellSetAtCreation.isEmpty() ? null : cellSetAtCreation
      .last();
  Cell second = snapshotAtCreation.isEmpty() ? null
      : snapshotAtCreation.last();
  Cell higherCell = getHighest(first, second);
  if (higherCell == null) {
    return false;
  }
  Cell firstCellOnLastRow = KeyValueUtil.createFirstOnRow(higherCell.getRowArray(),
      higherCell.getRowOffset(), higherCell.getRowLength());
  if (seek(firstCellOnLastRow)) {
    return true;
  } else {
    return seekToPreviousRow(higherCell);
  }

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:DefaultMemStore.java

示例7: joinedHeapMayHaveData

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
/**
 * @param currentRow
 * @param offset
 * @param length
 * @return true when the joined heap may have data for the current row
 * @throws IOException
 */
private boolean joinedHeapMayHaveData(byte[] currentRow, int offset, short length)
    throws IOException {
  Cell nextJoinedKv = joinedHeap.peek();
  boolean matchCurrentRow =
      nextJoinedKv != null && CellUtil.matchingRow(nextJoinedKv, currentRow, offset, length);
  boolean matchAfterSeek = false;

  // If the next value in the joined heap does not match the current row,
  // try to seek to the
  // correct row
  if (!matchCurrentRow) {
    Cell firstOnCurrentRow = KeyValueUtil.createFirstOnRow(currentRow, offset, length);
    boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true);
    matchAfterSeek = seekSuccessful && joinedHeap.peek() != null && CellUtil
        .matchingRow(joinedHeap.peek(), currentRow, offset, length);
  }

  return matchCurrentRow || matchAfterSeek;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:HRegion.java

示例8: reseek

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
@Override public synchronized boolean reseek(byte[] row) throws IOException {
  if (row == null) {
    throw new IllegalArgumentException("Row cannot be null.");
  }
  boolean result = false;
  startRegionOperation();
  KeyValue kv = KeyValueUtil.createFirstOnRow(row);
  try {
    // use request seek to make use of the lazy seek option. See HBASE-5520
    result = this.storeHeap.requestSeek(kv, true, true);
    if (this.joinedHeap != null) {
      result = this.joinedHeap.requestSeek(kv, true, true) || result;
    }
  } catch (FileNotFoundException e) {
    throw handleFileNotFound(e);
  } finally {
    closeRegionOperation();
  }
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:HRegion.java

示例9: encode

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
@Override
public int encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
    throws IOException {
  int klength = KeyValueUtil.keyLength(cell);
  int vlength = cell.getValueLength();

  out.writeInt(klength);
  out.writeInt(vlength);
  CellUtil.writeFlatKey(cell, out);
  out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
  int encodedKvSize = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
  // Write the additional tag into the stream
  if (encodingCtx.getHFileContext().isIncludesTags()) {
    int tagsLength = cell.getTagsLength();
    out.writeShort(tagsLength);
    if (tagsLength > 0) {
      out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
    }
    encodedKvSize += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
  }
  if (encodingCtx.getHFileContext().isIncludesMvcc()) {
    WritableUtils.writeVLong(out, cell.getSequenceId());
    encodedKvSize += WritableUtils.getVIntSize(cell.getSequenceId());
  }
  return encodedKvSize;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:NoOpDataBlockEncoder.java

示例10: assertScannerResults

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
private void assertScannerResults(KeyValueScanner scanner, KeyValue[] expected)
    throws IOException {
  scanner.seek(KeyValueUtil.createFirstOnRow(new byte[]{}));
  List<Cell> returned = Lists.newArrayList();

  while (true) {
    Cell next = scanner.next();
    if (next == null) break;
    returned.add(next);
  }

  assertTrue(
      "Got:\n" + Joiner.on("\n").join(returned) +
      "\nExpected:\n" + Joiner.on("\n").join(expected),
      Iterables.elementsEqual(Arrays.asList(expected), returned));
  assertNull(scanner.peek());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestDefaultMemStore.java

示例11: testReseek

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
/**
 * Test for HBASE-8012
 */
public void testReseek() throws Exception {
  // write the file
  Path f = new Path(ROOT_DIR, getName());
  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
          .withFilePath(f)
          .withFileContext(meta)
          .build();

  writeStoreFile(writer);
  writer.close();

  StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf);

  // Now do reseek with empty KV to position to the beginning of the file

  KeyValue k = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY);
  StoreFileScanner s = reader.getStoreFileScanner(false, false);
  s.reseek(k);

  assertNotNull("Intial reseek should position at the beginning of the file", s.peek());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestStoreFile.java

示例12: testTransformMPO

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
/**
 * Tests the behavior of transform() in a hierarchical filter.
 *
 * transform() only applies after a filterKeyValue() whose return-code includes the KeyValue.
 * Lazy evaluation of AND
 */
@Test
public void testTransformMPO() throws Exception {
  // Apply the following filter:
  //     (family=fam AND qualifier=qual1 AND KeyOnlyFilter)
  //  OR (family=fam AND qualifier=qual2)
  final FilterList flist = new FilterList(Operator.MUST_PASS_ONE, Lists.<Filter>newArrayList(
      new FilterList(Operator.MUST_PASS_ALL, Lists.<Filter>newArrayList(
          new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("fam"))),
          new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("qual1"))),
          new KeyOnlyFilter())),
      new FilterList(Operator.MUST_PASS_ALL, Lists.<Filter>newArrayList(
          new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("fam"))),
          new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("qual2")))))));

  final KeyValue kvQual1 = new KeyValue(
      Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("qual1"), Bytes.toBytes("value"));
  final KeyValue kvQual2 = new KeyValue(
      Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("qual2"), Bytes.toBytes("value"));
  final KeyValue kvQual3 = new KeyValue(
      Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("qual3"), Bytes.toBytes("value"));

  // Value for fam:qual1 should be stripped:
  assertEquals(Filter.ReturnCode.INCLUDE, flist.filterKeyValue(kvQual1));
  final KeyValue transformedQual1 = KeyValueUtil.ensureKeyValue(flist.transform(kvQual1));
  assertEquals(0, transformedQual1.getValue().length);

  // Value for fam:qual2 should not be stripped:
  assertEquals(Filter.ReturnCode.INCLUDE, flist.filterKeyValue(kvQual2));
  final KeyValue transformedQual2 = KeyValueUtil.ensureKeyValue(flist.transform(kvQual2));
  assertEquals("value", Bytes.toString(transformedQual2.getValue()));

  // Other keys should be skipped:
  assertEquals(Filter.ReturnCode.SKIP, flist.filterKeyValue(kvQual3));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestFilterList.java

示例13: binarySearch

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
protected int binarySearch(final Cell [] kvs,
                           final byte [] family,
                           final byte [] qualifier) {
  Cell searchTerm =
      KeyValueUtil.createFirstOnRow(CellUtil.cloneRow(kvs[0]),
          family, qualifier);

  // pos === ( -(insertion point) - 1)
  int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR);
  // never will exact match
  if (pos < 0) {
    pos = (pos+1) * -1;
    // pos is now insertion point
  }
  if (pos == kvs.length) {
    return -1; // doesn't exist
  }
  return pos;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:Result.java

示例14: createKeyOnlyCell

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
private Cell createKeyOnlyCell(Cell c) {
  // KV format: <keylen:4><valuelen:4><key:keylen><value:valuelen>
  // Rebuild as: <keylen:4><0:4><key:keylen>
  int dataLen = lenAsVal ? Bytes.SIZEOF_INT : 0;
  int keyOffset = (2 * Bytes.SIZEOF_INT);
  int keyLen = KeyValueUtil.keyLength(c);
  byte[] newBuffer = new byte[keyLen + keyOffset + dataLen];
  Bytes.putInt(newBuffer, 0, keyLen);
  Bytes.putInt(newBuffer, Bytes.SIZEOF_INT, dataLen);
  KeyValueUtil.appendKeyTo(c, newBuffer, keyOffset);
  if (lenAsVal) {
    Bytes.putInt(newBuffer, newBuffer.length - dataLen, c.getValueLength());
  }
  return new KeyValue(newBuffer);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:KeyOnlyFilter.java

示例15: testScanBackwards

import org.apache.hadoop.hbase.KeyValueUtil; //导入依赖的package包/类
@Test
public void testScanBackwards() throws IOException {
  CellSearcher searcher = null;
  try {
    searcher = DecoderFactory.checkOut(block, true);
    searcher.positionAfterLastCell();
    int i = -1;
    while (searcher.previous()) {
      ++i;
      int oppositeIndex = rows.getInputs().size() - i - 1;
      KeyValue inputKv = rows.getInputs().get(oppositeIndex);
      KeyValue outputKv = KeyValueUtil.copyToNewKeyValue(searcher.current());
      Assert.assertEquals(inputKv, outputKv);
    }
    Assert.assertEquals(rows.getInputs().size(), i + 1);
  } finally {
    DecoderFactory.checkIn(searcher);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestPrefixTreeSearcher.java


注:本文中的org.apache.hadoop.hbase.KeyValueUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。