当前位置: 首页>>代码示例>>Java>>正文


Java Cell.getRowLength方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.Cell.getRowLength方法的典型用法代码示例。如果您正苦于以下问题:Java Cell.getRowLength方法的具体用法?Java Cell.getRowLength怎么用?Java Cell.getRowLength使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.Cell的用法示例。


在下文中一共展示了Cell.getRowLength方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: hashResult

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
public void hashResult(Result result) {
  if (!batchStarted) {
    throw new RuntimeException("Cannot add to batch that has not been started.");
  }
  for (Cell cell : result.rawCells()) {
    int rowLength = cell.getRowLength();
    int familyLength = cell.getFamilyLength();
    int qualifierLength = cell.getQualifierLength();
    int valueLength = cell.getValueLength();
    digest.update(cell.getRowArray(), cell.getRowOffset(), rowLength);
    digest.update(cell.getFamilyArray(), cell.getFamilyOffset(), familyLength);
    digest.update(cell.getQualifierArray(), cell.getQualifierOffset(), qualifierLength);
    long ts = cell.getTimestamp();
    for (int i = 8; i > 0; i--) {
      digest.update((byte) ts);
      ts >>>= 8;
    }
    digest.update(cell.getValueArray(), cell.getValueOffset(), valueLength);
    
    batchSize += rowLength + familyLength + qualifierLength + 8 + valueLength;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:HashTable.java

示例2: getRoughSize

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
static int getRoughSize(final Cell [] cells) {
  int size = 0;
  for (Cell c: cells) {
    size += c.getRowLength() + c.getFamilyLength() + c.getQualifierLength() + c.getValueLength();
    size += Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE;
  }
  return size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:CodecPerformance.java

示例3: filter

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
protected boolean filter(Context context, Cell cell) {
  // TODO: Can I do a better compare than this copying out key?
  byte [] row = new byte [cell.getRowLength()];
  System.arraycopy(cell.getRowArray(), cell.getRowOffset(), row, 0, cell.getRowLength());
  boolean b = this.keysToFind.contains(row);
  if (b) {
    String keyStr = Bytes.toStringBinary(row);
    try {
      LOG.info("Found cell=" + cell + " , walKey=" + context.getCurrentKey());
    } catch (IOException|InterruptedException e) {
      LOG.warn(e);
    }
    if (rows.addAndGet(1) < MISSING_ROWS_TO_LOG) {
      context.getCounter(FOUND_GROUP_KEY, keyStr).increment(1);
    }
    context.getCounter(FOUND_GROUP_KEY, "CELL_WITH_MISSING_ROW").increment(1);
  }
  return b;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:IntegrationTestBigLinkedList.java

示例4: compareToCurrentToken

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Compare only the bytes within the window of the current token
 * @param key
 * @return return -1 if key is lessThan (before) this, 0 if equal, and 1 if key is after
 */
protected int compareToCurrentToken(Cell key) {
  int startIndex = rowLength - currentRowNode.getTokenLength();
  int endIndexExclusive = startIndex + currentRowNode.getTokenLength();
  for (int i = startIndex; i < endIndexExclusive; ++i) {
    if (i >= key.getRowLength()) {// key was shorter, so it's first
      return -1;
    }
    byte keyByte = CellUtil.getRowByte(key, i);
    byte thisByte = rowBuffer[i];
    if (keyByte == thisByte) {
      continue;
    }
    return UnsignedBytes.compare(keyByte, thisByte);
  }
  if (!currentRowNode.hasOccurrences() && rowLength >= key.getRowLength()) { // key was shorter
      return -1;
  }
  return 0;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:PrefixTreeArraySearcher.java

示例5: resetScannerStack

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
protected void resetScannerStack(Cell lastTopKey) throws IOException {
  if (heap != null) {
    throw new RuntimeException("StoreScanner.reseek run on an existing heap!");
  }

  /* When we have the scan object, should we not pass it to getScanners()
   * to get a limited set of scanners? We did so in the constructor and we
   * could have done it now by storing the scan object from the constructor */
  List<KeyValueScanner> scanners = getScannersNoCompaction();

  // Seek all scanners to the initial key
  seekScanners(scanners, lastTopKey, false, parallelSeekEnabled);

  // Combine all seeked scanners with a heap
  resetKVHeap(scanners, store.getComparator());

  // Reset the state of the Query Matcher and set to top row.
  // Only reset and call setRow if the row changes; avoids confusing the
  // query matcher if scanning intra-row.
  Cell kv = heap.peek();
  if (kv == null) {
    kv = lastTopKey;
  }
  byte[] row = kv.getRowArray();
  int offset = kv.getRowOffset();
  short length = kv.getRowLength();
  if ((matcher.row == null) || !Bytes
      .equals(row, offset, length, matcher.row, matcher.rowOffset, matcher.rowLength)) {
    this.countPerRow = 0;
    matcher.reset();
    matcher.setRow(row, offset, length);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:StoreScanner.java

示例6: rowMatchesAfterCurrentPosition

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * compare this.row to key.row but starting at the current rowLength
 * @param key Cell being searched for
 * @return true if row buffer contents match key.row
 */
protected boolean rowMatchesAfterCurrentPosition(Cell key) {
  if (!currentRowNode.hasOccurrences()) {
    return false;
  }
  int thatRowLength = key.getRowLength();
  if (rowLength != thatRowLength) {
    return false;
  }
  return true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:PrefixTreeArraySearcher.java

示例7: getKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * currently must do deep copy into new array
 */
@Override
public Cell getKeyValue() {
  Cell cell = ptSearcher.current();
  if (cell == null) {
    return null;
  }
  return new ClonedPrefixTreeCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
      cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
      cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
      cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), cell.getTagsArray(),
      cell.getTagsOffset(), cell.getTagsLength(), cell.getTimestamp(), cell.getTypeByte(),
      cell.getSequenceId());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:PrefixTreeSeeker.java

示例8: appendGeneralBloomfilter

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private void appendGeneralBloomfilter(final Cell cell) throws IOException {
  if (this.generalBloomFilterWriter != null) {
    // only add to the bloom filter on a new, unique key
    boolean newKey = true;
    if (this.lastCell != null) {
      switch (bloomType) {
      case ROW:
        newKey = !kvComparator.matchingRows(cell, lastCell);
        break;
      case ROWCOL:
        newKey = !kvComparator.matchingRowColumn(cell, lastCell);
        break;
      case NONE:
        newKey = false;
        break;
      default:
        throw new IOException(
            "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL expected)");
      }
    }
    if (newKey) {
      /*
       * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue.
       * png Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + TimeStamp 2 Types of
       * Filtering: 1. Row = Row 2. RowCol = Row + Qualifier
       */
      byte[] bloomKey;
      int bloomKeyOffset, bloomKeyLen;

      switch (bloomType) {
      case ROW:
        bloomKey = cell.getRowArray();
        bloomKeyOffset = cell.getRowOffset();
        bloomKeyLen = cell.getRowLength();
        break;
      case ROWCOL:
        // merge(row, qualifier)
        // TODO: could save one buffer copy in case of compound Bloom
        // filters when this involves creating a KeyValue
        bloomKey = generalBloomFilterWriter
            .createBloomKey(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
                cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
        bloomKeyOffset = 0;
        bloomKeyLen = bloomKey.length;
        break;
      default:
        throw new IOException(
            "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL expected)");
      }
      generalBloomFilterWriter.add(bloomKey, bloomKeyOffset, bloomKeyLen);
      if (lastBloomKey != null && generalBloomFilterWriter.getComparator()
          .compareFlatKey(bloomKey, bloomKeyOffset, bloomKeyLen, lastBloomKey,
              lastBloomKeyOffset, lastBloomKeyLen) <= 0) {
        throw new IOException("Non-increasing Bloom keys: " + Bytes
            .toStringBinary(bloomKey, bloomKeyOffset, bloomKeyLen) + " after " + Bytes
            .toStringBinary(lastBloomKey, lastBloomKeyOffset, lastBloomKeyLen));
      }
      lastBloomKey = bloomKey;
      lastBloomKeyOffset = bloomKeyOffset;
      lastBloomKeyLen = bloomKeyLen;
      this.lastCell = cell;
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:65,代码来源:StoreFile.java

示例9: positionAtOrAfter

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Identical workflow as positionAtOrBefore, but split them to avoid having ~10 extra
 * if-statements. Priority on readability and debugability.
 */
@Override
public CellScannerPosition positionAtOrAfter(Cell key) {
  reInitFirstNode();
  int fanIndex = -1;

  while(true){
    //detect row mismatch.  break loop if mismatch
    int currentNodeDepth = rowLength;
    int rowTokenComparison = compareToCurrentToken(key);
    if(rowTokenComparison != 0){
      return fixRowTokenMissForward(rowTokenComparison);
    }

    //exact row found, move on to qualifier & ts
    if(rowMatchesAfterCurrentPosition(key)){
      return positionAtQualifierTimestamp(key, false);
    }

    //detect dead end (no fan to descend into)
    if(!currentRowNode.hasFan()){
      if(hasOccurrences()){
        if (rowLength < key.getRowLength()) {
          nextRow();
        } else {
          populateFirstNonRowFields();
        }
        return CellScannerPosition.AFTER;
      }else{
        //TODO i don't think this case is exercised by any tests
        return fixRowFanMissForward(0);
      }
    }

    //keep hunting for the rest of the row
    byte searchForByte = CellUtil.getRowByte(key, currentNodeDepth);
    fanIndex = currentRowNode.whichFanNode(searchForByte);
    if(fanIndex < 0){//no matching row.  return early
      int insertionPoint = -fanIndex - 1;
      return fixRowFanMissForward(insertionPoint);
    }
    //found a match, so dig deeper into the tree
    followFan(fanIndex);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:49,代码来源:PrefixTreeArraySearcher.java

示例10: writeKeyExcludingCommon

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private void writeKeyExcludingCommon(Cell cell, int commonPrefix, DataOutputStream out)
    throws IOException {
  short rLen = cell.getRowLength();
  if (commonPrefix < rLen + KeyValue.ROW_LENGTH_SIZE) {
    // Previous and current rows are different. Need to write the differing part followed by
    // cf,q,ts and type
    CellUtil.writeRowKeyExcludingCommon(cell, rLen, commonPrefix, out);
    byte fLen = cell.getFamilyLength();
    out.writeByte(fLen);
    out.write(cell.getFamilyArray(), cell.getFamilyOffset(), fLen);
    out.write(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
    out.writeLong(cell.getTimestamp());
    out.writeByte(cell.getTypeByte());
  } else {
    // The full row key part is common. CF part will be common for sure as we deal with Cells in
    // same family. Just need write the differing part in q, ts and type
    commonPrefix = commonPrefix - (rLen + KeyValue.ROW_LENGTH_SIZE)
        - (cell.getFamilyLength() + KeyValue.FAMILY_LENGTH_SIZE);
    int qLen = cell.getQualifierLength();
    int commonQualPrefix = Math.min(commonPrefix, qLen);
    int qualPartLenToWrite = qLen - commonQualPrefix;
    if (qualPartLenToWrite > 0) {
      out.write(cell.getQualifierArray(), cell.getQualifierOffset() + commonQualPrefix,
          qualPartLenToWrite);
    }
    commonPrefix -= commonQualPrefix;
    // Common part in TS also?
    if (commonPrefix > 0) {
      int commonTimestampPrefix = Math.min(commonPrefix, KeyValue.TIMESTAMP_SIZE);
      if (commonTimestampPrefix < KeyValue.TIMESTAMP_SIZE) {
        byte[] curTsBuf = Bytes.toBytes(cell.getTimestamp());
        out.write(curTsBuf, commonTimestampPrefix, KeyValue.TIMESTAMP_SIZE
            - commonTimestampPrefix);
      }
      commonPrefix -= commonTimestampPrefix;
      if (commonPrefix == 0) {
        out.writeByte(cell.getTypeByte());
      }
    } else {
      out.writeLong(cell.getTimestamp());
      out.writeByte(cell.getTypeByte());
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:PrefixKeyDeltaEncoder.java


注:本文中的org.apache.hadoop.hbase.Cell.getRowLength方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。