当前位置: 首页>>代码示例>>Java>>正文


Java Cell.getFamilyLength方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.Cell.getFamilyLength方法的典型用法代码示例。如果您正苦于以下问题:Java Cell.getFamilyLength方法的具体用法?Java Cell.getFamilyLength怎么用?Java Cell.getFamilyLength使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.Cell的用法示例。


在下文中一共展示了Cell.getFamilyLength方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: hashResult

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
public void hashResult(Result result) {
  if (!batchStarted) {
    throw new RuntimeException("Cannot add to batch that has not been started.");
  }
  for (Cell cell : result.rawCells()) {
    int rowLength = cell.getRowLength();
    int familyLength = cell.getFamilyLength();
    int qualifierLength = cell.getQualifierLength();
    int valueLength = cell.getValueLength();
    digest.update(cell.getRowArray(), cell.getRowOffset(), rowLength);
    digest.update(cell.getFamilyArray(), cell.getFamilyOffset(), familyLength);
    digest.update(cell.getQualifierArray(), cell.getQualifierOffset(), qualifierLength);
    long ts = cell.getTimestamp();
    for (int i = 8; i > 0; i--) {
      digest.update((byte) ts);
      ts >>>= 8;
    }
    digest.update(cell.getValueArray(), cell.getValueOffset(), valueLength);
    
    batchSize += rowLength + familyLength + qualifierLength + 8 + valueLength;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:HashTable.java

示例2: getNewCell

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private static Cell getNewCell(final byte[] row, final long ts, final Cell cell,
    final Cell oldCell, final byte[] tagBytes) {
  // allocate an empty cell once
  Cell newCell = new KeyValue(row.length, cell.getFamilyLength(), cell.getQualifierLength(), ts,
      KeyValue.Type.Put, oldCell.getValueLength() + cell.getValueLength(),
      tagBytes == null ? 0 : tagBytes.length);
  // copy in row, family, and qualifier
  System.arraycopy(cell.getRowArray(), cell.getRowOffset(), newCell.getRowArray(),
      newCell.getRowOffset(), cell.getRowLength());
  System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), newCell.getFamilyArray(),
      newCell.getFamilyOffset(), cell.getFamilyLength());
  System
      .arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), newCell.getQualifierArray(),
          newCell.getQualifierOffset(), cell.getQualifierLength());
  // copy in the value
  System.arraycopy(oldCell.getValueArray(), oldCell.getValueOffset(), newCell.getValueArray(),
      newCell.getValueOffset(), oldCell.getValueLength());
  System.arraycopy(cell.getValueArray(), cell.getValueOffset(), newCell.getValueArray(),
      newCell.getValueOffset() + oldCell.getValueLength(), cell.getValueLength());
  // Copy in tag data
  if (tagBytes != null) {
    System
        .arraycopy(tagBytes, 0, newCell.getTagsArray(), newCell.getTagsOffset(), tagBytes.length);
  }
  return newCell;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:HRegion.java

示例3: getRoughSize

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
static int getRoughSize(final Cell [] cells) {
  int size = 0;
  for (Cell c: cells) {
    size += c.getRowLength() + c.getFamilyLength() + c.getQualifierLength() + c.getValueLength();
    size += Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE;
  }
  return size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:CodecPerformance.java

示例4: filterKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell v) {
  int familyLength = v.getFamilyLength();
  if (familyLength > 0) {
    if (doCompare(this.compareOp, this.comparator, v.getFamilyArray(),
        v.getFamilyOffset(), familyLength)) {
      return ReturnCode.NEXT_ROW;
    }
  }
  return ReturnCode.INCLUDE;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:FamilyFilter.java

示例5: getKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * currently must do deep copy into new array
 */
@Override
public Cell getKeyValue() {
  Cell cell = ptSearcher.current();
  if (cell == null) {
    return null;
  }
  return new ClonedPrefixTreeCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
      cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
      cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
      cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), cell.getTagsArray(),
      cell.getTagsOffset(), cell.getTagsLength(), cell.getTimestamp(), cell.getTypeByte(),
      cell.getSequenceId());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:PrefixTreeSeeker.java

示例6: compareTypeBytes

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private int compareTypeBytes(Cell key, Cell right) {
  if (key.getFamilyLength() + key.getQualifierLength() == 0
      && key.getTypeByte() == Type.Minimum.getCode()) {
    // left is "bigger", i.e. it appears later in the sorted order
    return 1;
  }
  if (right.getFamilyLength() + right.getQualifierLength() == 0
      && right.getTypeByte() == Type.Minimum.getCode()) {
    return -1;
  }
  return 0;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:BufferedDataBlockEncoder.java

示例7: requestSeek

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Pretend we have done a seek but don't do it yet, if possible. The hope is
 * that we find requested columns in more recent files and won't have to seek
 * in older files. Creates a fake key/value with the given row/column and the
 * highest (most recent) possible timestamp we might get from this file. When
 * users of such "lazy scanner" need to know the next KV precisely (e.g. when
 * this scanner is at the top of the heap), they run {@link #enforceSeek()}.
 * Note that this function does guarantee that the current KV of this scanner
 * will be advanced to at least the given KV. Because of this, it does have
 * to do a real seek in cases when the seek timestamp is older than the
 * highest timestamp of the file, e.g. when we are trying to seek to the next
 * row/column and use OLDEST_TIMESTAMP in the seek key.
 */
@Override public boolean requestSeek(Cell kv, boolean forward, boolean useBloom)
    throws IOException {
  if (kv.getFamilyLength() == 0) {
    useBloom = false;
  }

  boolean haveToSeek = true;
  if (useBloom) {
    // check ROWCOL Bloom filter first.
    if (reader.getBloomFilterType() == BloomType.ROWCOL) {
      haveToSeek = reader
          .passesGeneralBloomFilter(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
              kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
    } else if (this.matcher != null && !matcher.hasNullColumnInQuery() && ((
        CellUtil.isDeleteFamily(kv) || CellUtil.isDeleteFamilyVersion(kv)))) {
      // if there is no such delete family kv in the store file,
      // then no need to seek.
      haveToSeek = reader
          .passesDeleteFamilyBloomFilter(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
    }
  }

  delayedReseek = forward;
  delayedSeekKV = kv;

  if (haveToSeek) {
    // This row/column might be in this store file (or we did not use the
    // Bloom filter), so we still need to seek.
    realSeekDone = false;
    long maxTimestampInFile = reader.getMaxTimestamp();
    long seekTimestamp = kv.getTimestamp();
    if (seekTimestamp > maxTimestampInFile) {
      // Create a fake key that is not greater than the real next key.
      // (Lower timestamps correspond to higher KVs.)
      // To understand this better, consider that we are asked to seek to
      // a higher timestamp than the max timestamp in this file. We know that
      // the next point when we have to consider this file again is when we
      // pass the max timestamp of this file (with the same row/column).
      setCurrentCell(KeyValueUtil.createFirstOnRowColTS(kv, maxTimestampInFile));
    } else {
      // This will be the case e.g. when we need to seek to the next
      // row/column, and we don't know exactly what they are, so we set the
      // seek key's timestamp to OLDEST_TIMESTAMP to skip the rest of this
      // row/column.
      enforceSeek();
    }
    return cur != null;
  }

  // Multi-column Bloom filter optimization.
  // Create a fake key/value, so that this scanner only bubbles up to the top
  // of the KeyValueHeap in StoreScanner after we scanned this row/column in
  // all other store files. The query matcher will then just skip this fake
  // key/value and the store scanner will progress to the next column. This
  // is obviously not a "real real" seek, but unlike the fake KV earlier in
  // this method, we want this to be propagated to ScanQueryMatcher.
  setCurrentCell(KeyValueUtil.createLastOnRowCol(kv));

  realSeekDone = true;
  return true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:75,代码来源:StoreFileScanner.java

示例8: writeKeyExcludingCommon

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private void writeKeyExcludingCommon(Cell cell, int commonPrefix, DataOutputStream out)
    throws IOException {
  short rLen = cell.getRowLength();
  if (commonPrefix < rLen + KeyValue.ROW_LENGTH_SIZE) {
    // Previous and current rows are different. Need to write the differing part followed by
    // cf,q,ts and type
    CellUtil.writeRowKeyExcludingCommon(cell, rLen, commonPrefix, out);
    byte fLen = cell.getFamilyLength();
    out.writeByte(fLen);
    out.write(cell.getFamilyArray(), cell.getFamilyOffset(), fLen);
    out.write(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
    out.writeLong(cell.getTimestamp());
    out.writeByte(cell.getTypeByte());
  } else {
    // The full row key part is common. CF part will be common for sure as we deal with Cells in
    // same family. Just need write the differing part in q, ts and type
    commonPrefix = commonPrefix - (rLen + KeyValue.ROW_LENGTH_SIZE)
        - (cell.getFamilyLength() + KeyValue.FAMILY_LENGTH_SIZE);
    int qLen = cell.getQualifierLength();
    int commonQualPrefix = Math.min(commonPrefix, qLen);
    int qualPartLenToWrite = qLen - commonQualPrefix;
    if (qualPartLenToWrite > 0) {
      out.write(cell.getQualifierArray(), cell.getQualifierOffset() + commonQualPrefix,
          qualPartLenToWrite);
    }
    commonPrefix -= commonQualPrefix;
    // Common part in TS also?
    if (commonPrefix > 0) {
      int commonTimestampPrefix = Math.min(commonPrefix, KeyValue.TIMESTAMP_SIZE);
      if (commonTimestampPrefix < KeyValue.TIMESTAMP_SIZE) {
        byte[] curTsBuf = Bytes.toBytes(cell.getTimestamp());
        out.write(curTsBuf, commonTimestampPrefix, KeyValue.TIMESTAMP_SIZE
            - commonTimestampPrefix);
      }
      commonPrefix -= commonTimestampPrefix;
      if (commonPrefix == 0) {
        out.writeByte(cell.getTypeByte());
      }
    } else {
      out.writeLong(cell.getTimestamp());
      out.writeByte(cell.getTypeByte());
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:PrefixKeyDeltaEncoder.java


注:本文中的org.apache.hadoop.hbase.Cell.getFamilyLength方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。