本文整理汇总了Java中org.apache.hadoop.hbase.Cell.getSequenceId方法的典型用法代码示例。如果您正苦于以下问题:Java Cell.getSequenceId方法的具体用法?Java Cell.getSequenceId怎么用?Java Cell.getSequenceId使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.Cell
的用法示例。
在下文中一共展示了Cell.getSequenceId方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: rollback
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Remove n key from the memstore. Only cells that have the same key and the
* same memstoreTS are removed. It is ok to not update timeRangeTracker
* in this call. It is possible that we can optimize this method by using
* tailMap/iterator, but since this method is called rarely (only for
* error recovery), we can leave those optimization for the future.
* @param cell
*/
@Override
public void rollback(Cell cell) {
// If the key is in the snapshot, delete it. We should not update
// this.size, because that tracks the size of only the memstore and
// not the snapshot. The flush of this snapshot to disk has not
// yet started because Store.flush() waits for all rwcc transactions to
// commit before starting the flush to disk.
Cell found = this.snapshot.get(cell);
if (found != null && found.getSequenceId() == cell.getSequenceId()) {
this.snapshot.remove(cell);
long sz = heapSizeChange(cell, true);
this.snapshotSize -= sz;
}
// If the key is in the memstore, delete it. Update this.size.
found = this.cellSet.get(cell);
if (found != null && found.getSequenceId() == cell.getSequenceId()) {
removeFromCellSet(cell);
long s = heapSizeChange(cell, true);
this.size.addAndGet(-s);
}
}
示例2: getNext
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Lock on 'this' must be held by caller.
* @param it
* @return Next Cell
*/
private Cell getNext(Iterator<Cell> it) {
Cell startCell = theNext;
Cell v = null;
try {
while (it.hasNext()) {
v = it.next();
if (v.getSequenceId() <= this.readPoint) {
return v;
}
if (stopSkippingCellsIfNextRow && startCell != null
&& comparator.compareRows(v, startCell) > 0) {
return null;
}
}
return null;
} finally {
if (v != null) {
// in all cases, remember the last Cell iterated to
if (it == snapshotIt) {
snapshotItRow = v;
} else {
cellSetItRow = v;
}
}
}
}
示例3: getKeyValue
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* currently must do deep copy into new array
*/
@Override
public Cell getKeyValue() {
Cell cell = ptSearcher.current();
if (cell == null) {
return null;
}
return new ClonedPrefixTreeCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), cell.getTagsArray(),
cell.getTagsOffset(), cell.getTagsLength(), cell.getTimestamp(), cell.getTypeByte(),
cell.getSequenceId());
}
示例4: afterEncodingKeyValue
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* @param cell
* @param out
* @param encodingCtx
* @return unencoded size added
* @throws IOException
*/
protected final int afterEncodingKeyValue(Cell cell, DataOutputStream out,
HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
int size = 0;
if (encodingCtx.getHFileContext().isIncludesTags()) {
int tagsLength = cell.getTagsLength();
ByteBufferUtils.putCompressedInt(out, tagsLength);
// There are some tags to be written
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = encodingCtx.getTagCompressionContext();
// When tag compression is enabled, tagCompressionContext will have a not null value. Write
// the tags using Dictionary compression in such a case
if (tagCompressionContext != null) {
tagCompressionContext
.compressTags(out, cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
} else {
out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
}
}
size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
}
if (encodingCtx.getHFileContext().isIncludesMvcc()) {
// Copy memstore timestamp from the byte buffer to the output stream.
long memstoreTS = cell.getSequenceId();
WritableUtils.writeVLong(out, memstoreTS);
// TODO use a writeVLong which returns the #bytes written so that 2 time parsing can be
// avoided.
size += WritableUtils.getVIntSize(memstoreTS);
}
return size;
}
示例5: upsert
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Inserts the specified KeyValue into MemStore and deletes any existing
* versions of the same row/family/qualifier as the specified KeyValue.
* <p>
* First, the specified KeyValue is inserted into the Memstore.
* <p>
* If there are any existing KeyValues in this MemStore with the same row,
* family, and qualifier, they are removed.
* <p>
* Callers must hold the read lock.
*
* @param cell
* @return change in size of MemStore
*/
private long upsert(Cell cell, long readpoint) {
// Add the Cell to the MemStore
// Use the internalAdd method here since we (a) already have a lock
// and (b) cannot safely use the MSLAB here without potentially
// hitting OOME - see TestMemStore.testUpsertMSLAB for a
// test that triggers the pathological case if we don't avoid MSLAB
// here.
long addedSize = internalAdd(cell);
// Get the Cells for the row/family/qualifier regardless of timestamp.
// For this case we want to clean up any other puts
Cell firstCell = KeyValueUtil.createFirstOnRow(
cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
SortedSet<Cell> ss = cellSet.tailSet(firstCell);
Iterator<Cell> it = ss.iterator();
// versions visible to oldest scanner
int versionsVisible = 0;
while ( it.hasNext() ) {
Cell cur = it.next();
if (cell == cur) {
// ignore the one just put in
continue;
}
// check that this is the row and column we are interested in, otherwise bail
if (CellUtil.matchingRow(cell, cur) && CellUtil.matchingQualifier(cell, cur)) {
// only remove Puts that concurrent scanners cannot possibly see
if (cur.getTypeByte() == KeyValue.Type.Put.getCode() &&
cur.getSequenceId() <= readpoint) {
if (versionsVisible >= 1) {
// if we get here we have seen at least one version visible to the oldest scanner,
// which means we can prove that no scanner will see this version
// false means there was a change, so give us the size.
long delta = heapSizeChange(cur, true);
addedSize -= delta;
this.size.addAndGet(-delta);
it.remove();
setOldestEditTimeToNow();
} else {
versionsVisible++;
}
}
} else {
// past the row or column, done
break;
}
}
return addedSize;
}