本文整理汇总了Java中org.apache.hadoop.hbase.Cell.getTypeByte方法的典型用法代码示例。如果您正苦于以下问题:Java Cell.getTypeByte方法的具体用法?Java Cell.getTypeByte怎么用?Java Cell.getTypeByte使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.Cell
的用法示例。
在下文中一共展示了Cell.getTypeByte方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SerializableCell
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Copy data from {@code Cell} instance.
*
* @param cell
*/
public SerializableCell( Cell cell ) {
rowKey = CellUtil.cloneRow(cell);
family = CellUtil.cloneFamily(cell);
qualifier = CellUtil.cloneQualifier(cell);
value = CellUtil.cloneValue(cell);
timestamp = cell.getTimestamp();
type = cell.getTypeByte();
}
示例2: trackTimestamps
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Record the earlest Put timestamp. If the timeRangeTracker is not set, update TimeRangeTracker
* to include the timestamp of this key
*
* @param cell
*/
public void trackTimestamps(final Cell cell) {
if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) {
earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp());
}
if (!isTimeRangeTrackerSet) {
timeRangeTracker.includeTimestamp(cell);
}
}
示例3: add
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Add the specified KeyValue to the list of deletes to check against for
* this row operation.
* <p>
* This is called when a Delete is encountered.
* @param cell - the delete cell
*/
@Override
public void add(Cell cell) {
long timestamp = cell.getTimestamp();
int qualifierOffset = cell.getQualifierOffset();
int qualifierLength = cell.getQualifierLength();
byte type = cell.getTypeByte();
if (!hasFamilyStamp || timestamp > familyStamp) {
if (type == KeyValue.Type.DeleteFamily.getCode()) {
hasFamilyStamp = true;
familyStamp = timestamp;
return;
} else if (type == KeyValue.Type.DeleteFamilyVersion.getCode()) {
familyVersionStamps.add(timestamp);
return;
}
if (deleteBuffer != null && type < deleteType) {
// same column, so ignore less specific delete
if (Bytes.equals(deleteBuffer, deleteOffset, deleteLength,
cell.getQualifierArray(), qualifierOffset, qualifierLength)){
return;
}
}
// new column, or more general delete type
deleteBuffer = cell.getQualifierArray();
deleteOffset = qualifierOffset;
deleteLength = qualifierLength;
deleteType = type;
deleteTimestamp = timestamp;
}
// missing else is never called.
}
示例4: addAfterRowFamilyQualifier
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/***************** internal add methods ************************/
private void addAfterRowFamilyQualifier(Cell cell){
// timestamps
timestamps[totalCells] = cell.getTimestamp();
timestampEncoder.add(cell.getTimestamp());
// memstore timestamps
if (includeMvccVersion) {
mvccVersions[totalCells] = cell.getMvccVersion();
mvccVersionEncoder.add(cell.getMvccVersion());
totalUnencodedBytes += WritableUtils.getVIntSize(cell.getMvccVersion());
}else{
//must overwrite in case there was a previous version in this array slot
mvccVersions[totalCells] = 0L;
if(totalCells == 0){//only need to do this for the first cell added
mvccVersionEncoder.add(0L);
}
//totalUncompressedBytes += 0;//mvccVersion takes zero bytes when disabled
}
// types
typeBytes[totalCells] = cell.getTypeByte();
cellTypeEncoder.add(cell.getTypeByte());
// values
totalValueBytes += cell.getValueLength();
// double the array each time we run out of space
values = ArrayUtils.growIfNecessary(values, totalValueBytes, 2 * totalValueBytes);
CellUtil.copyValueTo(cell, values, valueOffsets[totalCells]);
if (cell.getValueLength() > maxValueLength) {
maxValueLength = cell.getValueLength();
}
valueOffsets[totalCells + 1] = totalValueBytes;
// general
totalUnencodedBytes += KeyValueUtil.length(cell);
++totalCells;
}
示例5: getKeyValue
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* currently must do deep copy into new array
*/
@Override
public Cell getKeyValue() {
Cell cell = ptSearcher.current();
if (cell == null) {
return null;
}
return new ClonedPrefixTreeCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), cell.getTagsArray(),
cell.getTagsOffset(), cell.getTagsLength(), cell.getTimestamp(), cell.getTypeByte(),
cell.getSequenceId());
}
示例6: compareTypeBytes
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private int compareTypeBytes(Cell key, Cell right) {
if (key.getFamilyLength() + key.getQualifierLength() == 0
&& key.getTypeByte() == Type.Minimum.getCode()) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if (right.getFamilyLength() + right.getQualifierLength() == 0
&& right.getTypeByte() == Type.Minimum.getCode()) {
return -1;
}
return 0;
}
示例7: add
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public void add(Cell delCell) {
//Cannot call super.add because need to find if the delete needs to be considered
long timestamp = delCell.getTimestamp();
int qualifierOffset = delCell.getQualifierOffset();
int qualifierLength = delCell.getQualifierLength();
byte type = delCell.getTypeByte();
if (type == KeyValue.Type.DeleteFamily.getCode()) {
hasFamilyStamp = true;
boolean hasVisTag = extractDeleteCellVisTags(delCell, KeyValue.Type.DeleteFamily);
if (!hasVisTag && timestamp > familyStamp) {
familyStamp = timestamp;
}
return;
} else if (type == KeyValue.Type.DeleteFamilyVersion.getCode()) {
familyVersionStamps.add(timestamp);
extractDeleteCellVisTags(delCell, KeyValue.Type.DeleteFamilyVersion);
return;
}
// new column, or more general delete type
if (deleteBuffer != null) {
if (Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength, delCell.getQualifierArray(),
qualifierOffset, qualifierLength) != 0) {
// A case where there are deletes for a column qualifier but there are
// no corresponding puts for them. Rare case.
visibilityTagsDeleteColumns = null;
visiblityTagsDeleteColumnVersion = null;
} else if (type == KeyValue.Type.Delete.getCode() && (deleteTimestamp != timestamp)) {
// there is a timestamp change which means we could clear the list
// when ts is same and the vis tags are different we need to collect
// them all. Interesting part is that in the normal case of puts if
// there are 2 cells with same ts and diff vis tags only one of them is
// returned. Handling with a single List<Tag> would mean that only one
// of the cell would be considered. Doing this as a precaution.
// Rare cases.
visiblityTagsDeleteColumnVersion = null;
}
}
deleteBuffer = delCell.getQualifierArray();
deleteOffset = qualifierOffset;
deleteLength = qualifierLength;
deleteType = type;
deleteTimestamp = timestamp;
extractDeleteCellVisTags(delCell, KeyValue.Type.codeToType(type));
}
示例8: updateColumnValue
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Only used by tests. TODO: Remove
*
* Given the specs of a column, update it, first by inserting a new record,
* then removing the old one. Since there is only 1 KeyValue involved, the memstoreTS
* will be set to 0, thus ensuring that they instantly appear to anyone. The underlying
* store will ensure that the insert/delete each are atomic. A scanner/reader will either
* get the new value, or the old value and all readers will eventually only see the new
* value after the old was removed.
*
* @param row
* @param family
* @param qualifier
* @param newValue
* @param now
* @return Timestamp
*/
@Override
public long updateColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long newValue,
long now) {
Cell firstCell = KeyValueUtil.createFirstOnRow(row, family, qualifier);
// Is there a Cell in 'snapshot' with the same TS? If so, upgrade the timestamp a bit.
SortedSet<Cell> snSs = snapshot.tailSet(firstCell);
if (!snSs.isEmpty()) {
Cell snc = snSs.first();
// is there a matching Cell in the snapshot?
if (CellUtil.matchingRow(snc, firstCell) && CellUtil.matchingQualifier(snc, firstCell)) {
if (snc.getTimestamp() == now) {
// poop,
now += 1;
}
}
}
// logic here: the new ts MUST be at least 'now'. But it could be larger if necessary.
// But the timestamp should also be max(now, mostRecentTsInMemstore)
// so we cant add the new Cell w/o knowing what's there already, but we also
// want to take this chance to delete some cells. So two loops (sad)
SortedSet<Cell> ss = cellSet.tailSet(firstCell);
for (Cell cell : ss) {
// if this isnt the row we are interested in, then bail:
if (!CellUtil.matchingColumn(cell, family, qualifier)
|| !CellUtil.matchingRow(cell, firstCell)) {
break; // rows dont match, bail.
}
// if the qualifier matches and it's a put, just RM it out of the cellSet.
if (cell.getTypeByte() == KeyValue.Type.Put.getCode() &&
cell.getTimestamp() > now && CellUtil.matchingQualifier(firstCell, cell)) {
now = cell.getTimestamp();
}
}
// create or update (upsert) a new Cell with
// 'now' and a 0 memstoreTS == immediately visible
List<Cell> cells = new ArrayList<Cell>(1);
cells.add(new KeyValue(row, family, qualifier, now, Bytes.toBytes(newValue)));
return upsert(cells, 1L);
}
示例9: upsert
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Inserts the specified KeyValue into MemStore and deletes any existing
* versions of the same row/family/qualifier as the specified KeyValue.
* <p>
* First, the specified KeyValue is inserted into the Memstore.
* <p>
* If there are any existing KeyValues in this MemStore with the same row,
* family, and qualifier, they are removed.
* <p>
* Callers must hold the read lock.
*
* @param cell
* @return change in size of MemStore
*/
private long upsert(Cell cell, long readpoint) {
// Add the Cell to the MemStore
// Use the internalAdd method here since we (a) already have a lock
// and (b) cannot safely use the MSLAB here without potentially
// hitting OOME - see TestMemStore.testUpsertMSLAB for a
// test that triggers the pathological case if we don't avoid MSLAB
// here.
long addedSize = internalAdd(cell);
// Get the Cells for the row/family/qualifier regardless of timestamp.
// For this case we want to clean up any other puts
Cell firstCell = KeyValueUtil.createFirstOnRow(
cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
SortedSet<Cell> ss = cellSet.tailSet(firstCell);
Iterator<Cell> it = ss.iterator();
// versions visible to oldest scanner
int versionsVisible = 0;
while ( it.hasNext() ) {
Cell cur = it.next();
if (cell == cur) {
// ignore the one just put in
continue;
}
// check that this is the row and column we are interested in, otherwise bail
if (CellUtil.matchingRow(cell, cur) && CellUtil.matchingQualifier(cell, cur)) {
// only remove Puts that concurrent scanners cannot possibly see
if (cur.getTypeByte() == KeyValue.Type.Put.getCode() &&
cur.getSequenceId() <= readpoint) {
if (versionsVisible >= 1) {
// if we get here we have seen at least one version visible to the oldest scanner,
// which means we can prove that no scanner will see this version
// false means there was a change, so give us the size.
long delta = heapSizeChange(cur, true);
addedSize -= delta;
this.size.addAndGet(-delta);
it.remove();
setOldestEditTimeToNow();
} else {
versionsVisible++;
}
}
} else {
// past the row or column, done
break;
}
}
return addedSize;
}
示例10: isNewRowOrType
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* @param previousCell
* @param cell
* @return True if we have crossed over onto a new row or type
*/
private boolean isNewRowOrType(final Cell previousCell, final Cell cell) {
return previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() ||
!CellUtil.matchingRow(previousCell, cell);
}