當前位置: 首頁>>代碼示例>>Java>>正文


Java KeyValue.setSequenceId方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.KeyValue.setSequenceId方法的典型用法代碼示例。如果您正苦於以下問題:Java KeyValue.setSequenceId方法的具體用法?Java KeyValue.setSequenceId怎麽用?Java KeyValue.setSequenceId使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.KeyValue的用法示例。


在下文中一共展示了KeyValue.setSequenceId方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: internalRun

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
private void internalRun() throws IOException {
  for (long i = 0; i < NUM_TRIES && caughtException.get() == null; i++) {
    MultiVersionConcurrencyControl.WriteEntry w =
        mvcc.begin();

    // Insert the sequence value (i)
    byte[] v = Bytes.toBytes(i);

    KeyValue kv = new KeyValue(row, f, q1, i, v);
    kv.setSequenceId(w.getWriteNumber());
    memstore.add(kv);
    mvcc.completeAndWait(w);

    // Assert that we can read back
    KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
    s.seek(kv);

    Cell ret = s.next();
    assertNotNull("Didnt find own write at all", ret);
    assertEquals("Didnt read own writes",
                 kv.getTimestamp(), ret.getTimestamp());
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:24,代碼來源:TestDefaultMemStore.java

示例2: testOne

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
@Test
public void testOne() throws IOException {
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  CountingOutputStream cos = new CountingOutputStream(baos);
  DataOutputStream dos = new DataOutputStream(cos);
  Codec codec = new CellCodec();
  Codec.Encoder encoder = codec.getEncoder(dos);
  final KeyValue kv =
    new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
  kv.setSequenceId(Long.MAX_VALUE);
  encoder.write(kv);
  encoder.flush();
  dos.close();
  long offset = cos.getCount();
  CountingInputStream cis =
    new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
  DataInputStream dis = new DataInputStream(cis);
  Codec.Decoder decoder = codec.getDecoder(dis);
  assertTrue(decoder.advance()); // First read should pull in the KV
  // Second read should trip over the end-of-stream marker and return false
  assertFalse(decoder.advance());
  dis.close();
  assertEquals(offset, cis.getCount());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestCellCodec.java

示例3: getKeyValue

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
@Override
public Cell getKeyValue() {
  if (!isSeeked())
    return null;
  if (currTagsLen > 0) {
    KeyValue ret = new KeyValue(blockBuffer.array(), blockBuffer.arrayOffset()
        + blockBuffer.position(), getCellBufSize());
    if (this.reader.shouldIncludeMemstoreTS()) {
      ret.setSequenceId(currMemstoreTS);
    }
    return ret;
  } else {
    return formNoTagsKeyValue();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:16,代碼來源:HFileReaderV3.java

示例4: writeToHFile

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
private Path writeToHFile(long l, String hFilePath, String pathStr, boolean nativeHFile)
    throws IOException {
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  final Path hfilePath = new Path(hFilePath);
  fs.mkdirs(hfilePath);
  Path path = new Path(pathStr);
  HFile.WriterFactory wf = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
  Assert.assertNotNull(wf);
  HFileContext context = new HFileContext();
  HFile.Writer writer = wf.withPath(fs, path).withFileContext(context).create();
  KeyValue kv = new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
      Bytes.toBytes("version2"));

  // Set cell seq id to test bulk load native hfiles.
  if (nativeHFile) {
    // Set a big seq id. Scan should not look at this seq id in a bulk loaded file.
    // Scan should only look at the seq id appended at the bulk load time, and not skip
    // this kv.
    kv.setSequenceId(9999999);
  }

  writer.append(kv);

  if (nativeHFile) {
    // Set a big MAX_SEQ_ID_KEY. Scan should not look at this seq id in a bulk loaded file.
    // Scan should only look at the seq id appended at the bulk load time, and not skip its
    // kv.
    writer.appendFileInfo(StoreFile.MAX_SEQ_ID_KEY, Bytes.toBytes(new Long(9999999)));
  }
  else {
  writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
  }
  writer.close();
  return hfilePath;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:36,代碼來源:TestScannerWithBulkload.java

示例5: testMemstoreConcurrentControl

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
public void testMemstoreConcurrentControl() throws IOException {
  final byte[] row = Bytes.toBytes(1);
  final byte[] f = Bytes.toBytes("family");
  final byte[] q1 = Bytes.toBytes("q1");
  final byte[] q2 = Bytes.toBytes("q2");
  final byte[] v = Bytes.toBytes("value");

  MultiVersionConcurrencyControl.WriteEntry w =
      mvcc.begin();

  KeyValue kv1 = new KeyValue(row, f, q1, v);
  kv1.setSequenceId(w.getWriteNumber());
  memstore.add(kv1);

  KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{});

  mvcc.completeAndWait(w);

  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv1});

  w = mvcc.begin();
  KeyValue kv2 = new KeyValue(row, f, q2, v);
  kv2.setSequenceId(w.getWriteNumber());
  memstore.add(kv2);

  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv1});

  mvcc.completeAndWait(w);

  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv1, kv2});
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:36,代碼來源:TestDefaultMemStore.java

示例6: testUpsertMemstoreSize

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
 * Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased
 * as older keyvalues are deleted from the memstore.
 * @throws Exception
 */
public void testUpsertMemstoreSize() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  memstore = new DefaultMemStore(conf, KeyValue.COMPARATOR);
  long oldSize = memstore.size.get();

  List<Cell> l = new ArrayList<Cell>();
  KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
  KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v");
  KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v");

  kv1.setSequenceId(1); kv2.setSequenceId(1);kv3.setSequenceId(1);
  l.add(kv1); l.add(kv2); l.add(kv3);

  this.memstore.upsert(l, 2);// readpoint is 2
  long newSize = this.memstore.size.get();
  assert(newSize > oldSize);
  //The kv1 should be removed.
  assert(memstore.cellSet.size() == 2);
  
  KeyValue kv4 = KeyValueTestUtil.create("r", "f", "q", 104, "v");
  kv4.setSequenceId(1);
  l.clear(); l.add(kv4);
  this.memstore.upsert(l, 3);
  assertEquals(newSize, this.memstore.size.get());
  //The kv2 should be removed.
  assert(memstore.cellSet.size() == 2);
  //this.memstore = null;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestDefaultMemStore.java

示例7: testUpdateToTimeOfOldestEdit

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
 * Tests that the timeOfOldestEdit is updated correctly for the 
 * various edit operations in memstore.
 * @throws Exception
 */
public void testUpdateToTimeOfOldestEdit() throws Exception {
  try {
    EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
    EnvironmentEdgeManager.injectEdge(edge);
    DefaultMemStore memstore = new DefaultMemStore();
    long t = memstore.timeOfOldestEdit();
    assertEquals(t, Long.MAX_VALUE);

    // test the case that the timeOfOldestEdit is updated after a KV add
    memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, "v"));
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
    // snapshot() will reset timeOfOldestEdit. The method will also assert the 
    // value is reset to Long.MAX_VALUE
    t = runSnapshot(memstore);

    // test the case that the timeOfOldestEdit is updated after a KV delete
    memstore.delete(KeyValueTestUtil.create("r", "f", "q", 100, "v"));
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
    t = runSnapshot(memstore);

    // test the case that the timeOfOldestEdit is updated after a KV upsert
    List<Cell> l = new ArrayList<Cell>();
    KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
    kv1.setSequenceId(100);
    l.add(kv1);
    memstore.upsert(l, 1000);
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
  } finally {
    EnvironmentEdgeManager.reset();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:TestDefaultMemStore.java

示例8: readCell

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
static KeyValue readCell(PositionedByteRange pbr) throws Exception {
  int kvStartPos = pbr.getPosition();
  int keyLen = pbr.getInt();
  int valLen = pbr.getInt();
  pbr.setPosition(pbr.getPosition() + keyLen + valLen); // Skip the key and value section
  int tagsLen = ((pbr.get() & 0xff) << 8) ^ (pbr.get() & 0xff);
  pbr.setPosition(pbr.getPosition() + tagsLen); // Skip the tags section
  long mvcc = pbr.getVLong();
  KeyValue kv = new KeyValue(pbr.getBytes(), kvStartPos,
      (int) KeyValue.getKeyValueDataStructureSize(keyLen, valLen, tagsLen));
  kv.setSequenceId(mvcc);
  return kv;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:TestByteRangeWithKVSerialization.java

示例9: testMemstoreEditsVisibilityWithSameKey

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
 * Regression test for HBASE-2616, HBASE-2670.
 * When we insert a higher-memstoreTS version of a cell but with
 * the same timestamp, we still need to provide consistent reads
 * for the same scanner.
 */
public void testMemstoreEditsVisibilityWithSameKey() throws IOException {
  final byte[] row = Bytes.toBytes(1);
  final byte[] f = Bytes.toBytes("family");
  final byte[] q1 = Bytes.toBytes("q1");
  final byte[] q2 = Bytes.toBytes("q2");
  final byte[] v1 = Bytes.toBytes("value1");
  final byte[] v2 = Bytes.toBytes("value2");

  // INSERT 1: Write both columns val1
  MultiVersionConcurrencyControl.WriteEntry w =
      mvcc.begin();

  KeyValue kv11 = new KeyValue(row, f, q1, v1);
  kv11.setSequenceId(w.getWriteNumber());
  memstore.add(kv11);

  KeyValue kv12 = new KeyValue(row, f, q2, v1);
  kv12.setSequenceId(w.getWriteNumber());
  memstore.add(kv12);
  mvcc.completeAndWait(w);

  // BEFORE STARTING INSERT 2, SEE FIRST KVS
  KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv11, kv12});

  // START INSERT 2: Write both columns val2
  w = mvcc.begin();
  KeyValue kv21 = new KeyValue(row, f, q1, v2);
  kv21.setSequenceId(w.getWriteNumber());
  memstore.add(kv21);

  KeyValue kv22 = new KeyValue(row, f, q2, v2);
  kv22.setSequenceId(w.getWriteNumber());
  memstore.add(kv22);

  // BEFORE COMPLETING INSERT 2, SEE FIRST KVS
  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv11, kv12});

  // COMPLETE INSERT 2
  mvcc.completeAndWait(w);

  // NOW SHOULD SEE NEW KVS IN ADDITION TO OLD KVS.
  // See HBASE-1485 for discussion about what we should do with
  // the duplicate-TS inserts
  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv21, kv11, kv22, kv12});
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:55,代碼來源:TestDefaultMemStore.java

示例10: testMemstoreDeletesVisibilityWithSameKey

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
 * When we insert a higher-memstoreTS deletion of a cell but with
 * the same timestamp, we still need to provide consistent reads
 * for the same scanner.
 */
public void testMemstoreDeletesVisibilityWithSameKey() throws IOException {
  final byte[] row = Bytes.toBytes(1);
  final byte[] f = Bytes.toBytes("family");
  final byte[] q1 = Bytes.toBytes("q1");
  final byte[] q2 = Bytes.toBytes("q2");
  final byte[] v1 = Bytes.toBytes("value1");
  // INSERT 1: Write both columns val1
  MultiVersionConcurrencyControl.WriteEntry w =
      mvcc.begin();

  KeyValue kv11 = new KeyValue(row, f, q1, v1);
  kv11.setSequenceId(w.getWriteNumber());
  memstore.add(kv11);

  KeyValue kv12 = new KeyValue(row, f, q2, v1);
  kv12.setSequenceId(w.getWriteNumber());
  memstore.add(kv12);
  mvcc.completeAndWait(w);

  // BEFORE STARTING INSERT 2, SEE FIRST KVS
  KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv11, kv12});

  // START DELETE: Insert delete for one of the columns
  w = mvcc.begin();
  KeyValue kvDel = new KeyValue(row, f, q2, kv11.getTimestamp(),
      KeyValue.Type.DeleteColumn);
  kvDel.setSequenceId(w.getWriteNumber());
  memstore.add(kvDel);

  // BEFORE COMPLETING DELETE, SEE FIRST KVS
  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv11, kv12});

  // COMPLETE DELETE
  mvcc.completeAndWait(w);

  // NOW WE SHOULD SEE DELETE
  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv11, kvDel, kv12});
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:47,代碼來源:TestDefaultMemStore.java

示例11: writeTestKeyValues

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
static int writeTestKeyValues(HFileBlock.Writer hbw, int seed, boolean includesMemstoreTS,
    boolean useTag) throws IOException {
  List<KeyValue> keyValues = new ArrayList<KeyValue>();
  Random randomizer = new Random(42l + seed); // just any fixed number

  // generate keyValues
  for (int i = 0; i < NUM_KEYVALUES; ++i) {
    byte[] row;
    long timestamp;
    byte[] family;
    byte[] qualifier;
    byte[] value;

    // generate it or repeat, it should compress well
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      row = keyValues.get(randomizer.nextInt(keyValues.size())).getRow();
    } else {
      row = new byte[FIELD_LENGTH];
      randomizer.nextBytes(row);
    }
    if (0 == i) {
      family = new byte[FIELD_LENGTH];
      randomizer.nextBytes(family);
    } else {
      family = keyValues.get(0).getFamily();
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      qualifier = keyValues.get(
          randomizer.nextInt(keyValues.size())).getQualifier();
    } else {
      qualifier = new byte[FIELD_LENGTH];
      randomizer.nextBytes(qualifier);
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      value = keyValues.get(randomizer.nextInt(keyValues.size())).getValue();
    } else {
      value = new byte[FIELD_LENGTH];
      randomizer.nextBytes(value);
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      timestamp = keyValues.get(
          randomizer.nextInt(keyValues.size())).getTimestamp();
    } else {
      timestamp = randomizer.nextLong();
    }
    if (!useTag) {
      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value));
    } else {
      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value, new Tag[] { new Tag(
          (byte) 1, Bytes.toBytes("myTagVal")) }));
    }
  }

  // sort it and write to stream
  int totalSize = 0;
  Collections.sort(keyValues, KeyValue.COMPARATOR);

  for (KeyValue kv : keyValues) {
    totalSize += kv.getLength();
    if (includesMemstoreTS) {
      long memstoreTS = randomizer.nextLong();
      kv.setSequenceId(memstoreTS);
      totalSize += WritableUtils.getVIntSize(memstoreTS);
    }
    hbw.write(kv);
  }
  return totalSize;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:69,代碼來源:TestHFileBlock.java

示例12: getIterator

import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
 * Provides access to compressed value.
 * @param headerSize header size of the block.
 * @return Forwards sequential iterator.
 */
public Iterator<Cell> getIterator(int headerSize) {
  final int rawSize = rawKVs.length;
  byte[] encodedDataWithHeader = getEncodedData();
  int bytesToSkip = headerSize + Bytes.SIZEOF_SHORT;
  ByteArrayInputStream bais = new ByteArrayInputStream(encodedDataWithHeader,
      bytesToSkip, encodedDataWithHeader.length - bytesToSkip);
  final DataInputStream dis = new DataInputStream(bais);

  return new Iterator<Cell>() {
    private ByteBuffer decompressedData = null;

    @Override
    public boolean hasNext() {
      if (decompressedData == null) {
        return rawSize > 0;
      }
      return decompressedData.hasRemaining();
    }

    @Override
    public Cell next() {
      if (decompressedData == null) {
        try {
          decompressedData = dataBlockEncoder.decodeKeyValues(dis, dataBlockEncoder
              .newDataBlockDecodingContext(meta));
        } catch (IOException e) {
          throw new RuntimeException("Problem with data block encoder, " +
              "most likely it requested more bytes than are available.", e);
        }
        decompressedData.rewind();
      }
      int offset = decompressedData.position();
      int klen = decompressedData.getInt();
      int vlen = decompressedData.getInt();
      int tagsLen = 0;
      ByteBufferUtils.skip(decompressedData, klen + vlen);
      // Read the tag length in case when steam contain tags
      if (meta.isIncludesTags()) {
        tagsLen = ((decompressedData.get() & 0xff) << 8) ^ (decompressedData.get() & 0xff);
        ByteBufferUtils.skip(decompressedData, tagsLen);
      }
      KeyValue kv = new KeyValue(decompressedData.array(), offset,
          (int) KeyValue.getKeyValueDataStructureSize(klen, vlen, tagsLen));
      if (meta.isIncludesMvcc()) {
        long mvccVersion = ByteBufferUtils.readVLong(decompressedData);
        kv.setSequenceId(mvccVersion);
      }
      return kv;
    }

    @Override
    public void remove() {
      throw new NotImplementedException("remove() is not supported!");
    }

    @Override
    public String toString() {
      return "Iterator of: " + dataBlockEncoder.getClass().getName();
    }

  };
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:68,代碼來源:EncodedDataBlock.java


注:本文中的org.apache.hadoop.hbase.KeyValue.setSequenceId方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。