当前位置: 首页>>代码示例>>Java>>正文


Java Cell.getQualifierArray方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.Cell.getQualifierArray方法的典型用法代码示例。如果您正苦于以下问题:Java Cell.getQualifierArray方法的具体用法?Java Cell.getQualifierArray怎么用?Java Cell.getQualifierArray使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.Cell的用法示例。


在下文中一共展示了Cell.getQualifierArray方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: add

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Add the specified KeyValue to the list of deletes to check against for
 * this row operation.
 * <p>
 * This is called when a Delete is encountered.
 * @param cell - the delete cell
 */
@Override
public void add(Cell cell) {
  long timestamp = cell.getTimestamp();
  int qualifierOffset = cell.getQualifierOffset();
  int qualifierLength = cell.getQualifierLength();
  byte type = cell.getTypeByte();
  if (!hasFamilyStamp || timestamp > familyStamp) {
    if (type == KeyValue.Type.DeleteFamily.getCode()) {
      hasFamilyStamp = true;
      familyStamp = timestamp;
      return;
    } else if (type == KeyValue.Type.DeleteFamilyVersion.getCode()) {
      familyVersionStamps.add(timestamp);
      return;
    }

    if (deleteBuffer != null && type < deleteType) {
      // same column, so ignore less specific delete
      if (Bytes.equals(deleteBuffer, deleteOffset, deleteLength,
          cell.getQualifierArray(), qualifierOffset, qualifierLength)){
        return;
      }
    }
    // new column, or more general delete type
    deleteBuffer = cell.getQualifierArray();
    deleteOffset = qualifierOffset;
    deleteLength = qualifierLength;
    deleteType = type;
    deleteTimestamp = timestamp;
  }
  // missing else is never called.
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:ScanDeleteTracker.java

示例2: filterKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell kv) {
  // TODO have a column compare method in Cell
  byte[] buffer = kv.getQualifierArray();
  int qualifierOffset = kv.getQualifierOffset();
  int qualifierLength = kv.getQualifierLength();
  int cmpMin = 1;

  if (this.minColumn != null) {
    cmpMin = Bytes.compareTo(buffer, qualifierOffset, qualifierLength,
        this.minColumn, 0, this.minColumn.length);
  }

  if (cmpMin < 0) {
    return ReturnCode.SEEK_NEXT_USING_HINT;
  }

  if (!this.minColumnInclusive && cmpMin == 0) {
    return ReturnCode.NEXT_COL;
  }

  if (this.maxColumn == null) {
    return ReturnCode.INCLUDE;
  }

  int cmpMax = Bytes.compareTo(buffer, qualifierOffset, qualifierLength,
      this.maxColumn, 0, this.maxColumn.length);

  if (this.maxColumnInclusive && cmpMax <= 0 ||
      !this.maxColumnInclusive && cmpMax < 0) {
    return ReturnCode.INCLUDE;
  }

  return ReturnCode.NEXT_ROW;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:ColumnRangeFilter.java

示例3: filterKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell v)
{
  if (columnOffset != null) {
    if (count >= limit) {
      return ReturnCode.NEXT_ROW;
    }
    byte[] buffer = v.getQualifierArray();
    if (buffer == null) {
      return ReturnCode.SEEK_NEXT_USING_HINT;
    }
    int cmp = 0;
    // Only compare if no KV's have been seen so far.
    if (count == 0) {
      cmp = Bytes.compareTo(buffer,
                            v.getQualifierOffset(),
                            v.getQualifierLength(),
                            this.columnOffset,
                            0,
                            this.columnOffset.length);
    }
    if (cmp < 0) {
      return ReturnCode.SEEK_NEXT_USING_HINT;
    } else {
      count++;
      return ReturnCode.INCLUDE_AND_NEXT_COL;
    }
  } else {
    if (count >= offset + limit) {
      return ReturnCode.NEXT_ROW;
    }

    ReturnCode code = count < offset ? ReturnCode.NEXT_COL :
                                       ReturnCode.INCLUDE_AND_NEXT_COL;
    count++;
    return code;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:ColumnPaginationFilter.java

示例4: filterKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell kv) {
  if (this.prefix == null || kv.getQualifierArray() == null) {
    return ReturnCode.INCLUDE;
  } else {
    return filterColumn(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:ColumnPrefixFilter.java

示例5: filterKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell kv) {
  if (sortedPrefixes.size() == 0 || kv.getQualifierArray() == null) {
    return ReturnCode.INCLUDE;
  } else {
    return filterColumn(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:MultipleColumnPrefixFilter.java

示例6: getKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * currently must do deep copy into new array
 */
@Override
public Cell getKeyValue() {
  Cell cell = ptSearcher.current();
  if (cell == null) {
    return null;
  }
  return new ClonedPrefixTreeCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
      cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
      cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
      cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), cell.getTagsArray(),
      cell.getTagsOffset(), cell.getTagsLength(), cell.getTimestamp(), cell.getTypeByte(),
      cell.getSequenceId());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:PrefixTreeSeeker.java

示例7: add

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public void add(Cell delCell) {
  //Cannot call super.add because need to find if the delete needs to be considered
  long timestamp = delCell.getTimestamp();
  int qualifierOffset = delCell.getQualifierOffset();
  int qualifierLength = delCell.getQualifierLength();
  byte type = delCell.getTypeByte();
  if (type == KeyValue.Type.DeleteFamily.getCode()) {
    hasFamilyStamp = true;
    boolean hasVisTag = extractDeleteCellVisTags(delCell, KeyValue.Type.DeleteFamily);
    if (!hasVisTag && timestamp > familyStamp) {
      familyStamp = timestamp;
    }
    return;
  } else if (type == KeyValue.Type.DeleteFamilyVersion.getCode()) {
    familyVersionStamps.add(timestamp);
    extractDeleteCellVisTags(delCell, KeyValue.Type.DeleteFamilyVersion);
    return;
  }
  // new column, or more general delete type
  if (deleteBuffer != null) {
    if (Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength, delCell.getQualifierArray(),
        qualifierOffset, qualifierLength) != 0) {
      // A case where there are deletes for a column qualifier but there are
      // no corresponding puts for them. Rare case.
      visibilityTagsDeleteColumns = null;
      visiblityTagsDeleteColumnVersion = null;
    } else if (type == KeyValue.Type.Delete.getCode() && (deleteTimestamp != timestamp)) {
      // there is a timestamp change which means we could clear the list
      // when ts is same and the vis tags are different we need to collect
      // them all. Interesting part is that in the normal case of puts if
      // there are 2 cells with same ts and diff vis tags only one of them is
      // returned. Handling with a single List<Tag> would mean that only one
      // of the cell would be considered. Doing this as a precaution.
      // Rare cases.
      visiblityTagsDeleteColumnVersion = null;
    }
  }
  deleteBuffer = delCell.getQualifierArray();
  deleteOffset = qualifierOffset;
  deleteLength = qualifierLength;
  deleteType = type;
  deleteTimestamp = timestamp;
  extractDeleteCellVisTags(delCell, KeyValue.Type.codeToType(type));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:46,代码来源:VisibilityScanDeleteTracker.java


注:本文中的org.apache.hadoop.hbase.Cell.getQualifierArray方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。