当前位置: 首页>>代码示例>>Java>>正文


Java KeyValue.setSequenceId方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.KeyValue.setSequenceId方法的典型用法代码示例。如果您正苦于以下问题:Java KeyValue.setSequenceId方法的具体用法?Java KeyValue.setSequenceId怎么用?Java KeyValue.setSequenceId使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.KeyValue的用法示例。


在下文中一共展示了KeyValue.setSequenceId方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: internalRun

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
private void internalRun() throws IOException {
  for (long i = 0; i < NUM_TRIES && caughtException.get() == null; i++) {
    MultiVersionConcurrencyControl.WriteEntry w =
        mvcc.begin();

    // Insert the sequence value (i)
    byte[] v = Bytes.toBytes(i);

    KeyValue kv = new KeyValue(row, f, q1, i, v);
    kv.setSequenceId(w.getWriteNumber());
    memstore.add(kv);
    mvcc.completeAndWait(w);

    // Assert that we can read back
    KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
    s.seek(kv);

    Cell ret = s.next();
    assertNotNull("Didnt find own write at all", ret);
    assertEquals("Didnt read own writes",
                 kv.getTimestamp(), ret.getTimestamp());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestDefaultMemStore.java

示例2: testOne

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
@Test
public void testOne() throws IOException {
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  CountingOutputStream cos = new CountingOutputStream(baos);
  DataOutputStream dos = new DataOutputStream(cos);
  Codec codec = new CellCodec();
  Codec.Encoder encoder = codec.getEncoder(dos);
  final KeyValue kv =
    new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
  kv.setSequenceId(Long.MAX_VALUE);
  encoder.write(kv);
  encoder.flush();
  dos.close();
  long offset = cos.getCount();
  CountingInputStream cis =
    new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
  DataInputStream dis = new DataInputStream(cis);
  Codec.Decoder decoder = codec.getDecoder(dis);
  assertTrue(decoder.advance()); // First read should pull in the KV
  // Second read should trip over the end-of-stream marker and return false
  assertFalse(decoder.advance());
  dis.close();
  assertEquals(offset, cis.getCount());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestCellCodec.java

示例3: getKeyValue

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
@Override
public Cell getKeyValue() {
  if (!isSeeked())
    return null;
  if (currTagsLen > 0) {
    KeyValue ret = new KeyValue(blockBuffer.array(), blockBuffer.arrayOffset()
        + blockBuffer.position(), getCellBufSize());
    if (this.reader.shouldIncludeMemstoreTS()) {
      ret.setSequenceId(currMemstoreTS);
    }
    return ret;
  } else {
    return formNoTagsKeyValue();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:HFileReaderV3.java

示例4: writeToHFile

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
private Path writeToHFile(long l, String hFilePath, String pathStr, boolean nativeHFile)
    throws IOException {
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  final Path hfilePath = new Path(hFilePath);
  fs.mkdirs(hfilePath);
  Path path = new Path(pathStr);
  HFile.WriterFactory wf = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
  Assert.assertNotNull(wf);
  HFileContext context = new HFileContext();
  HFile.Writer writer = wf.withPath(fs, path).withFileContext(context).create();
  KeyValue kv = new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
      Bytes.toBytes("version2"));

  // Set cell seq id to test bulk load native hfiles.
  if (nativeHFile) {
    // Set a big seq id. Scan should not look at this seq id in a bulk loaded file.
    // Scan should only look at the seq id appended at the bulk load time, and not skip
    // this kv.
    kv.setSequenceId(9999999);
  }

  writer.append(kv);

  if (nativeHFile) {
    // Set a big MAX_SEQ_ID_KEY. Scan should not look at this seq id in a bulk loaded file.
    // Scan should only look at the seq id appended at the bulk load time, and not skip its
    // kv.
    writer.appendFileInfo(StoreFile.MAX_SEQ_ID_KEY, Bytes.toBytes(new Long(9999999)));
  }
  else {
  writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
  }
  writer.close();
  return hfilePath;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestScannerWithBulkload.java

示例5: testMemstoreConcurrentControl

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
public void testMemstoreConcurrentControl() throws IOException {
  final byte[] row = Bytes.toBytes(1);
  final byte[] f = Bytes.toBytes("family");
  final byte[] q1 = Bytes.toBytes("q1");
  final byte[] q2 = Bytes.toBytes("q2");
  final byte[] v = Bytes.toBytes("value");

  MultiVersionConcurrencyControl.WriteEntry w =
      mvcc.begin();

  KeyValue kv1 = new KeyValue(row, f, q1, v);
  kv1.setSequenceId(w.getWriteNumber());
  memstore.add(kv1);

  KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{});

  mvcc.completeAndWait(w);

  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv1});

  w = mvcc.begin();
  KeyValue kv2 = new KeyValue(row, f, q2, v);
  kv2.setSequenceId(w.getWriteNumber());
  memstore.add(kv2);

  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv1});

  mvcc.completeAndWait(w);

  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv1, kv2});
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestDefaultMemStore.java

示例6: testUpsertMemstoreSize

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
 * Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased
 * as older keyvalues are deleted from the memstore.
 * @throws Exception
 */
public void testUpsertMemstoreSize() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  memstore = new DefaultMemStore(conf, KeyValue.COMPARATOR);
  long oldSize = memstore.size.get();

  List<Cell> l = new ArrayList<Cell>();
  KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
  KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v");
  KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v");

  kv1.setSequenceId(1); kv2.setSequenceId(1);kv3.setSequenceId(1);
  l.add(kv1); l.add(kv2); l.add(kv3);

  this.memstore.upsert(l, 2);// readpoint is 2
  long newSize = this.memstore.size.get();
  assert(newSize > oldSize);
  //The kv1 should be removed.
  assert(memstore.cellSet.size() == 2);
  
  KeyValue kv4 = KeyValueTestUtil.create("r", "f", "q", 104, "v");
  kv4.setSequenceId(1);
  l.clear(); l.add(kv4);
  this.memstore.upsert(l, 3);
  assertEquals(newSize, this.memstore.size.get());
  //The kv2 should be removed.
  assert(memstore.cellSet.size() == 2);
  //this.memstore = null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestDefaultMemStore.java

示例7: testUpdateToTimeOfOldestEdit

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
 * Tests that the timeOfOldestEdit is updated correctly for the 
 * various edit operations in memstore.
 * @throws Exception
 */
public void testUpdateToTimeOfOldestEdit() throws Exception {
  try {
    EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
    EnvironmentEdgeManager.injectEdge(edge);
    DefaultMemStore memstore = new DefaultMemStore();
    long t = memstore.timeOfOldestEdit();
    assertEquals(t, Long.MAX_VALUE);

    // test the case that the timeOfOldestEdit is updated after a KV add
    memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, "v"));
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
    // snapshot() will reset timeOfOldestEdit. The method will also assert the 
    // value is reset to Long.MAX_VALUE
    t = runSnapshot(memstore);

    // test the case that the timeOfOldestEdit is updated after a KV delete
    memstore.delete(KeyValueTestUtil.create("r", "f", "q", 100, "v"));
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
    t = runSnapshot(memstore);

    // test the case that the timeOfOldestEdit is updated after a KV upsert
    List<Cell> l = new ArrayList<Cell>();
    KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
    kv1.setSequenceId(100);
    l.add(kv1);
    memstore.upsert(l, 1000);
    t = memstore.timeOfOldestEdit();
    assertTrue(t == 1234);
  } finally {
    EnvironmentEdgeManager.reset();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:TestDefaultMemStore.java

示例8: readCell

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
static KeyValue readCell(PositionedByteRange pbr) throws Exception {
  int kvStartPos = pbr.getPosition();
  int keyLen = pbr.getInt();
  int valLen = pbr.getInt();
  pbr.setPosition(pbr.getPosition() + keyLen + valLen); // Skip the key and value section
  int tagsLen = ((pbr.get() & 0xff) << 8) ^ (pbr.get() & 0xff);
  pbr.setPosition(pbr.getPosition() + tagsLen); // Skip the tags section
  long mvcc = pbr.getVLong();
  KeyValue kv = new KeyValue(pbr.getBytes(), kvStartPos,
      (int) KeyValue.getKeyValueDataStructureSize(keyLen, valLen, tagsLen));
  kv.setSequenceId(mvcc);
  return kv;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestByteRangeWithKVSerialization.java

示例9: testMemstoreEditsVisibilityWithSameKey

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
 * Regression test for HBASE-2616, HBASE-2670.
 * When we insert a higher-memstoreTS version of a cell but with
 * the same timestamp, we still need to provide consistent reads
 * for the same scanner.
 */
public void testMemstoreEditsVisibilityWithSameKey() throws IOException {
  final byte[] row = Bytes.toBytes(1);
  final byte[] f = Bytes.toBytes("family");
  final byte[] q1 = Bytes.toBytes("q1");
  final byte[] q2 = Bytes.toBytes("q2");
  final byte[] v1 = Bytes.toBytes("value1");
  final byte[] v2 = Bytes.toBytes("value2");

  // INSERT 1: Write both columns val1
  MultiVersionConcurrencyControl.WriteEntry w =
      mvcc.begin();

  KeyValue kv11 = new KeyValue(row, f, q1, v1);
  kv11.setSequenceId(w.getWriteNumber());
  memstore.add(kv11);

  KeyValue kv12 = new KeyValue(row, f, q2, v1);
  kv12.setSequenceId(w.getWriteNumber());
  memstore.add(kv12);
  mvcc.completeAndWait(w);

  // BEFORE STARTING INSERT 2, SEE FIRST KVS
  KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv11, kv12});

  // START INSERT 2: Write both columns val2
  w = mvcc.begin();
  KeyValue kv21 = new KeyValue(row, f, q1, v2);
  kv21.setSequenceId(w.getWriteNumber());
  memstore.add(kv21);

  KeyValue kv22 = new KeyValue(row, f, q2, v2);
  kv22.setSequenceId(w.getWriteNumber());
  memstore.add(kv22);

  // BEFORE COMPLETING INSERT 2, SEE FIRST KVS
  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv11, kv12});

  // COMPLETE INSERT 2
  mvcc.completeAndWait(w);

  // NOW SHOULD SEE NEW KVS IN ADDITION TO OLD KVS.
  // See HBASE-1485 for discussion about what we should do with
  // the duplicate-TS inserts
  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv21, kv11, kv22, kv12});
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:55,代码来源:TestDefaultMemStore.java

示例10: testMemstoreDeletesVisibilityWithSameKey

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
 * When we insert a higher-memstoreTS deletion of a cell but with
 * the same timestamp, we still need to provide consistent reads
 * for the same scanner.
 */
public void testMemstoreDeletesVisibilityWithSameKey() throws IOException {
  final byte[] row = Bytes.toBytes(1);
  final byte[] f = Bytes.toBytes("family");
  final byte[] q1 = Bytes.toBytes("q1");
  final byte[] q2 = Bytes.toBytes("q2");
  final byte[] v1 = Bytes.toBytes("value1");
  // INSERT 1: Write both columns val1
  MultiVersionConcurrencyControl.WriteEntry w =
      mvcc.begin();

  KeyValue kv11 = new KeyValue(row, f, q1, v1);
  kv11.setSequenceId(w.getWriteNumber());
  memstore.add(kv11);

  KeyValue kv12 = new KeyValue(row, f, q2, v1);
  kv12.setSequenceId(w.getWriteNumber());
  memstore.add(kv12);
  mvcc.completeAndWait(w);

  // BEFORE STARTING INSERT 2, SEE FIRST KVS
  KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv11, kv12});

  // START DELETE: Insert delete for one of the columns
  w = mvcc.begin();
  KeyValue kvDel = new KeyValue(row, f, q2, kv11.getTimestamp(),
      KeyValue.Type.DeleteColumn);
  kvDel.setSequenceId(w.getWriteNumber());
  memstore.add(kvDel);

  // BEFORE COMPLETING DELETE, SEE FIRST KVS
  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv11, kv12});

  // COMPLETE DELETE
  mvcc.completeAndWait(w);

  // NOW WE SHOULD SEE DELETE
  s = this.memstore.getScanners(mvcc.getReadPoint()).get(0);
  assertScannerResults(s, new KeyValue[]{kv11, kvDel, kv12});
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:47,代码来源:TestDefaultMemStore.java

示例11: writeTestKeyValues

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
static int writeTestKeyValues(HFileBlock.Writer hbw, int seed, boolean includesMemstoreTS,
    boolean useTag) throws IOException {
  List<KeyValue> keyValues = new ArrayList<KeyValue>();
  Random randomizer = new Random(42l + seed); // just any fixed number

  // generate keyValues
  for (int i = 0; i < NUM_KEYVALUES; ++i) {
    byte[] row;
    long timestamp;
    byte[] family;
    byte[] qualifier;
    byte[] value;

    // generate it or repeat, it should compress well
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      row = keyValues.get(randomizer.nextInt(keyValues.size())).getRow();
    } else {
      row = new byte[FIELD_LENGTH];
      randomizer.nextBytes(row);
    }
    if (0 == i) {
      family = new byte[FIELD_LENGTH];
      randomizer.nextBytes(family);
    } else {
      family = keyValues.get(0).getFamily();
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      qualifier = keyValues.get(
          randomizer.nextInt(keyValues.size())).getQualifier();
    } else {
      qualifier = new byte[FIELD_LENGTH];
      randomizer.nextBytes(qualifier);
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      value = keyValues.get(randomizer.nextInt(keyValues.size())).getValue();
    } else {
      value = new byte[FIELD_LENGTH];
      randomizer.nextBytes(value);
    }
    if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
      timestamp = keyValues.get(
          randomizer.nextInt(keyValues.size())).getTimestamp();
    } else {
      timestamp = randomizer.nextLong();
    }
    if (!useTag) {
      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value));
    } else {
      keyValues.add(new KeyValue(row, family, qualifier, timestamp, value, new Tag[] { new Tag(
          (byte) 1, Bytes.toBytes("myTagVal")) }));
    }
  }

  // sort it and write to stream
  int totalSize = 0;
  Collections.sort(keyValues, KeyValue.COMPARATOR);

  for (KeyValue kv : keyValues) {
    totalSize += kv.getLength();
    if (includesMemstoreTS) {
      long memstoreTS = randomizer.nextLong();
      kv.setSequenceId(memstoreTS);
      totalSize += WritableUtils.getVIntSize(memstoreTS);
    }
    hbw.write(kv);
  }
  return totalSize;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:69,代码来源:TestHFileBlock.java

示例12: getIterator

import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
 * Provides access to compressed value.
 * @param headerSize header size of the block.
 * @return Forwards sequential iterator.
 */
public Iterator<Cell> getIterator(int headerSize) {
  final int rawSize = rawKVs.length;
  byte[] encodedDataWithHeader = getEncodedData();
  int bytesToSkip = headerSize + Bytes.SIZEOF_SHORT;
  ByteArrayInputStream bais = new ByteArrayInputStream(encodedDataWithHeader,
      bytesToSkip, encodedDataWithHeader.length - bytesToSkip);
  final DataInputStream dis = new DataInputStream(bais);

  return new Iterator<Cell>() {
    private ByteBuffer decompressedData = null;

    @Override
    public boolean hasNext() {
      if (decompressedData == null) {
        return rawSize > 0;
      }
      return decompressedData.hasRemaining();
    }

    @Override
    public Cell next() {
      if (decompressedData == null) {
        try {
          decompressedData = dataBlockEncoder.decodeKeyValues(dis, dataBlockEncoder
              .newDataBlockDecodingContext(meta));
        } catch (IOException e) {
          throw new RuntimeException("Problem with data block encoder, " +
              "most likely it requested more bytes than are available.", e);
        }
        decompressedData.rewind();
      }
      int offset = decompressedData.position();
      int klen = decompressedData.getInt();
      int vlen = decompressedData.getInt();
      int tagsLen = 0;
      ByteBufferUtils.skip(decompressedData, klen + vlen);
      // Read the tag length in case when steam contain tags
      if (meta.isIncludesTags()) {
        tagsLen = ((decompressedData.get() & 0xff) << 8) ^ (decompressedData.get() & 0xff);
        ByteBufferUtils.skip(decompressedData, tagsLen);
      }
      KeyValue kv = new KeyValue(decompressedData.array(), offset,
          (int) KeyValue.getKeyValueDataStructureSize(klen, vlen, tagsLen));
      if (meta.isIncludesMvcc()) {
        long mvccVersion = ByteBufferUtils.readVLong(decompressedData);
        kv.setSequenceId(mvccVersion);
      }
      return kv;
    }

    @Override
    public void remove() {
      throw new NotImplementedException("remove() is not supported!");
    }

    @Override
    public String toString() {
      return "Iterator of: " + dataBlockEncoder.getClass().getName();
    }

  };
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:68,代码来源:EncodedDataBlock.java


注:本文中的org.apache.hadoop.hbase.KeyValue.setSequenceId方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。