本文整理汇总了Java中org.apache.hadoop.hbase.Cell.getTagsLength方法的典型用法代码示例。如果您正苦于以下问题:Java Cell.getTagsLength方法的具体用法?Java Cell.getTagsLength怎么用?Java Cell.getTagsLength使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.Cell
的用法示例。
在下文中一共展示了Cell.getTagsLength方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkForReservedVisibilityTagPresence
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This
* tag type is reserved and should not be explicitly set by user. There are
* two versions of this method one that accepts pair and other without pair.
* In case of preAppend and preIncrement the additional operations are not
* needed like checking for STRING_VIS_TAG_TYPE and hence the API without pair
* could be used.
*
* @param cell
* @return true or false
* @throws IOException
*/
private boolean checkForReservedVisibilityTagPresence(Cell cell) throws IOException {
// Bypass this check when the operation is done by a system/super user.
// This is done because, while Replication, the Cells coming to the peer
// cluster with reserved
// typed tags and this is fine and should get added to the peer cluster
// table
if (isSystemOrSuperUser()) {
return true;
}
if (cell.getTagsLength() > 0) {
Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength());
while (tagsItr.hasNext()) {
if (RESERVED_VIS_TAG_TYPES.contains(tagsItr.next().getType())) {
return false;
}
}
}
return true;
}
示例2: internalEncode
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
DataOutputStream out) throws IOException {
int klength = KeyValueUtil.keyLength(cell);
int vlength = cell.getValueLength();
out.writeInt(klength);
out.writeInt(vlength);
CellUtil.writeFlatKey(cell, out);
out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
int size = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
// Write the additional tag into the stream
if (encodingContext.getHFileContext().isIncludesTags()) {
int tagsLength = cell.getTagsLength();
out.writeShort(tagsLength);
if (tagsLength > 0) {
out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
}
size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
}
if (encodingContext.getHFileContext().isIncludesMvcc()) {
WritableUtils.writeVLong(out, cell.getSequenceId());
size += WritableUtils.getVIntSize(cell.getSequenceId());
}
return size;
}
示例3: extractVisibilityTags
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Extract the visibility tags of the given Cell into the given List
* @param cell - the cell
* @param tags - the array that will be populated if visibility tags are present
* @return The visibility tags serialization format
*/
public static Byte extractVisibilityTags(Cell cell, List<Tag> tags) {
Byte serializationFormat = null;
if (cell.getTagsLength() > 0) {
Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength());
while (tagsIterator.hasNext()) {
Tag tag = tagsIterator.next();
if (tag.getType() == TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
serializationFormat = tag.getBuffer()[tag.getTagOffset()];
} else if (tag.getType() == VISIBILITY_TAG_TYPE) {
tags.add(tag);
}
}
}
return serializationFormat;
}
示例4: extractAndPartitionTags
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* Extracts and partitions the visibility tags and nonVisibility Tags
*
* @param cell - the cell for which we would extract and partition the
* visibility and non visibility tags
* @param visTags
* - all the visibilty tags of type TagType.VISIBILITY_TAG_TYPE would
* be added to this list
* @param nonVisTags - all the non visibility tags would be added to this list
* @return - the serailization format of the tag. Can be null if no tags are found or
* if there is no visibility tag found
*/
public static Byte extractAndPartitionTags(Cell cell, List<Tag> visTags,
List<Tag> nonVisTags) {
Byte serializationFormat = null;
if (cell.getTagsLength() > 0) {
Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength());
while (tagsIterator.hasNext()) {
Tag tag = tagsIterator.next();
if (tag.getType() == TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
serializationFormat = tag.getBuffer()[tag.getTagOffset()];
} else if (tag.getType() == VISIBILITY_TAG_TYPE) {
visTags.add(tag);
} else {
// ignore string encoded visibility expressions, will be added in replication handling
nonVisTags.add(tag);
}
}
}
return serializationFormat;
}
示例5: addCellPermissions
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private static void addCellPermissions(final byte[] perms, Map<byte[], List<Cell>> familyMap) {
// Iterate over the entries in the familyMap, replacing the cells therein
// with new cells including the ACL data
for (Map.Entry<byte[], List<Cell>> e: familyMap.entrySet()) {
List<Cell> newCells = Lists.newArrayList();
for (Cell cell: e.getValue()) {
// Prepend the supplied perms in a new ACL tag to an update list of tags for the cell
List<Tag> tags = Lists.newArrayList(new Tag(AccessControlLists.ACL_TAG_TYPE, perms));
if (cell.getTagsLength() > 0) {
Iterator<Tag> tagIterator = CellUtil.tagsIterator(cell.getTagsArray(),
cell.getTagsOffset(), cell.getTagsLength());
while (tagIterator.hasNext()) {
tags.add(tagIterator.next());
}
}
newCells.add(new TagRewriteCell(cell, Tag.fromList(tags)));
}
// This is supposed to be safe, won't CME
e.setValue(newCells);
}
}
示例6: toStringMap
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private static Map<String, Object> toStringMap(Cell cell) {
Map<String, Object> stringMap = new HashMap<String, Object>();
stringMap.put("row",
Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
stringMap.put("family", Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength()));
stringMap.put("qualifier",
Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength()));
stringMap.put("timestamp", cell.getTimestamp());
stringMap.put("vlen", cell.getValueLength());
if (cell.getTagsLength() > 0) {
List<String> tagsString = new ArrayList<String>();
Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength());
while (tagsIterator.hasNext()) {
Tag tag = tagsIterator.next();
tagsString.add((tag.getType()) + ":"
+ Bytes.toStringBinary(tag.getBuffer(), tag.getTagOffset(), tag.getTagLength()));
}
stringMap.put("tag", tagsString);
}
return stringMap;
}
示例7: verifyTags
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* This verifies that each cell has a tag that is equal to its rowkey name. For this to work
* the hbase instance must have HConstants.RPC_CODEC_CONF_KEY set to
* KeyValueCodecWithTags.class.getCanonicalName());
* @param table table containing tagged cells
* @throws IOException if problems reading table
*/
public static void verifyTags(Table table) throws IOException {
ResultScanner s = table.getScanner(new Scan());
for (Result r : s) {
for (Cell c : r.listCells()) {
byte[] ta = c.getTagsArray();
int toff = c.getTagsOffset();
int tlen = c.getTagsLength();
Tag t = Tag.getTag(ta, toff, tlen, TagType.ACL_TAG_TYPE);
if (t == null) {
fail(c.toString() + " has null tag");
continue;
}
byte[] tval = t.getValue();
assertArrayEquals(c.toString() + " has tag" + Bytes.toString(tval),
r.getRow(), tval);
}
}
}
示例8: afterEncodingKeyValue
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* @param cell
* @param out
* @param encodingCtx
* @return unencoded size added
* @throws IOException
*/
protected final int afterEncodingKeyValue(Cell cell, DataOutputStream out,
HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
int size = 0;
if (encodingCtx.getHFileContext().isIncludesTags()) {
int tagsLength = cell.getTagsLength();
ByteBufferUtils.putCompressedInt(out, tagsLength);
// There are some tags to be written
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = encodingCtx.getTagCompressionContext();
// When tag compression is enabled, tagCompressionContext will have a not null value. Write
// the tags using Dictionary compression in such a case
if (tagCompressionContext != null) {
tagCompressionContext
.compressTags(out, cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
} else {
out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
}
}
size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
}
if (encodingCtx.getHFileContext().isIncludesMvcc()) {
// Copy memstore timestamp from the byte buffer to the output stream.
long memstoreTS = cell.getSequenceId();
WritableUtils.writeVLong(out, memstoreTS);
// TODO use a writeVLong which returns the #bytes written so that 2 time parsing can be
// avoided.
size += WritableUtils.getVIntSize(memstoreTS);
}
return size;
}
示例9: postMutationBeforeWAL
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell) throws IOException {
List<Tag> tags = Lists.newArrayList();
CellVisibility cellVisibility = null;
try {
cellVisibility = mutation.getCellVisibility();
} catch (DeserializationException e) {
throw new IOException(e);
}
if (cellVisibility == null) {
return newCell;
}
// Prepend new visibility tags to a new list of tags for the cell
// Don't check user auths for labels with Mutations when the user is super user
boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser());
tags.addAll(this.visibilityLabelService.createVisibilityExpTags(cellVisibility.getExpression(),
true, authCheck));
// Save an object allocation where we can
if (newCell.getTagsLength() > 0) {
// Carry forward all other tags
Iterator<Tag> tagsItr = CellUtil.tagsIterator(newCell.getTagsArray(),
newCell.getTagsOffset(), newCell.getTagsLength());
while (tagsItr.hasNext()) {
Tag tag = tagsItr.next();
if (tag.getType() != TagType.VISIBILITY_TAG_TYPE
&& tag.getType() != TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
tags.add(tag);
}
}
}
Cell rewriteCell = new TagRewriteCell(newCell, Tag.fromList(tags));
return rewriteCell;
}
示例10: isVisibilityTagsPresent
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
public static boolean isVisibilityTagsPresent(Cell cell) {
if (cell.getTagsLength() == 0) {
return false;
}
Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength());
while (tagsIterator.hasNext()) {
Tag tag = tagsIterator.next();
if (tag.getType() == VISIBILITY_TAG_TYPE) {
return true;
}
}
return false;
}
示例11: checkForReservedTagPresence
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private void checkForReservedTagPresence(User user, Mutation m) throws IOException {
// No need to check if we're not going to throw
if (!authorizationEnabled) {
m.setAttribute(TAG_CHECK_PASSED, TRUE);
return;
}
// Superusers are allowed to store cells unconditionally.
if (Superusers.isSuperUser(user)) {
m.setAttribute(TAG_CHECK_PASSED, TRUE);
return;
}
// We already checked (prePut vs preBatchMutation)
if (m.getAttribute(TAG_CHECK_PASSED) != null) {
return;
}
for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) {
Cell cell = cellScanner.current();
if (cell.getTagsLength() > 0) {
Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength());
while (tagsItr.hasNext()) {
if (tagsItr.next().getType() == AccessControlLists.ACL_TAG_TYPE) {
throw new AccessDeniedException("Mutation contains cell with reserved type tag");
}
}
}
}
m.setAttribute(TAG_CHECK_PASSED, TRUE);
}
示例12: write
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public void write(Cell cell) throws IOException {
// We first write the KeyValue infrastructure as VInts.
StreamUtils.writeRawVInt32(out, KeyValueUtil.keyLength(cell));
StreamUtils.writeRawVInt32(out, cell.getValueLength());
// To support tags
int tagsLength = cell.getTagsLength();
StreamUtils.writeRawVInt32(out, tagsLength);
// Write row, qualifier, and family; use dictionary
// compression as they're likely to have duplicates.
write(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), compression.rowDict);
write(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
compression.familyDict);
write(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
compression.qualifierDict);
// Write timestamp, type and value as uncompressed.
StreamUtils.writeLong(out, cell.getTimestamp());
out.write(cell.getTypeByte());
out.write(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
if (tagsLength > 0) {
if (compression.tagCompressionContext != null) {
// Write tags using Dictionary compression
compression.tagCompressionContext.compressTags(out, cell.getTagsArray(),
cell.getTagsOffset(), tagsLength);
} else {
// Tag compression is disabled within the WAL compression. Just write the tags bytes as
// it is.
out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
}
}
}
示例13: addToCellSet
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private boolean addToCellSet(Cell e) {
boolean b = this.cellSet.add(e);
// In no tags case this NoTagsKeyValue.getTagsLength() is a cheap call.
// When we use ACL CP or Visibility CP which deals with Tags during
// mutation, the TagRewriteCell.getTagsLength() is a cheaper call. We do not
// parse the byte[] to identify the tags length.
if(e.getTagsLength() > 0) {
tagsPresent = true;
}
setOldestEditTimeToNow();
return b;
}
示例14: carryForwardTags
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* @param cell
* @param tags
* @return The passed-in List<Tag> but with the tags from <code>cell</code> added.
*/
private static List<Tag> carryForwardTags(final Cell cell, final List<Tag> tags) {
if (cell.getTagsLength() <= 0) return tags;
List<Tag> newTags = tags == null ? new ArrayList<Tag>() : /* Append Tags */tags;
Iterator<Tag> i =
CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
while (i.hasNext()) newTags.add(i.next());
return newTags;
}
示例15: getKeyValue
import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
* currently must do deep copy into new array
*/
@Override
public Cell getKeyValue() {
Cell cell = ptSearcher.current();
if (cell == null) {
return null;
}
return new ClonedPrefixTreeCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), cell.getTagsArray(),
cell.getTagsOffset(), cell.getTagsLength(), cell.getTimestamp(), cell.getTypeByte(),
cell.getSequenceId());
}