当前位置: 首页>>代码示例>>Java>>正文


Java Cell.getValueLength方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.Cell.getValueLength方法的典型用法代码示例。如果您正苦于以下问题:Java Cell.getValueLength方法的具体用法?Java Cell.getValueLength怎么用?Java Cell.getValueLength使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.Cell的用法示例。


在下文中一共展示了Cell.getValueLength方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getRegionServer

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Returns the {@link ServerName} from catalog table {@link Result}
 * where the region is transitioning. It should be the same as
 * {@link HRegionInfo#getServerName(Result)} if the server is at OPEN state.
 * @param r Result to pull the transitioning server name from
 * @return A ServerName instance or {@link HRegionInfo#getServerName(Result)}
 * if necessary fields not found or empty.
 */
static ServerName getRegionServer(final Result r, int replicaId) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId));
  if (cell == null || cell.getValueLength() == 0) {
    RegionLocations locations = MetaTableAccessor.getRegionLocations(r);
    if (locations != null) {
      HRegionLocation location = locations.getRegionLocation(replicaId);
      if (location != null) {
        return location.getServerName();
      }
    }
    return null;
  }
  return ServerName.parseServerName(Bytes.toString(cell.getValueArray(),
    cell.getValueOffset(), cell.getValueLength()));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:RegionStateStore.java

示例2: hashResult

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
public void hashResult(Result result) {
  if (!batchStarted) {
    throw new RuntimeException("Cannot add to batch that has not been started.");
  }
  for (Cell cell : result.rawCells()) {
    int rowLength = cell.getRowLength();
    int familyLength = cell.getFamilyLength();
    int qualifierLength = cell.getQualifierLength();
    int valueLength = cell.getValueLength();
    digest.update(cell.getRowArray(), cell.getRowOffset(), rowLength);
    digest.update(cell.getFamilyArray(), cell.getFamilyOffset(), familyLength);
    digest.update(cell.getQualifierArray(), cell.getQualifierOffset(), qualifierLength);
    long ts = cell.getTimestamp();
    for (int i = 8; i > 0; i--) {
      digest.update((byte) ts);
      ts >>>= 8;
    }
    digest.update(cell.getValueArray(), cell.getValueOffset(), valueLength);
    
    batchSize += rowLength + familyLength + qualifierLength + 8 + valueLength;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:HashTable.java

示例3: getNewCell

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private static Cell getNewCell(final byte[] row, final long ts, final Cell cell,
    final Cell oldCell, final byte[] tagBytes) {
  // allocate an empty cell once
  Cell newCell = new KeyValue(row.length, cell.getFamilyLength(), cell.getQualifierLength(), ts,
      KeyValue.Type.Put, oldCell.getValueLength() + cell.getValueLength(),
      tagBytes == null ? 0 : tagBytes.length);
  // copy in row, family, and qualifier
  System.arraycopy(cell.getRowArray(), cell.getRowOffset(), newCell.getRowArray(),
      newCell.getRowOffset(), cell.getRowLength());
  System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), newCell.getFamilyArray(),
      newCell.getFamilyOffset(), cell.getFamilyLength());
  System
      .arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), newCell.getQualifierArray(),
          newCell.getQualifierOffset(), cell.getQualifierLength());
  // copy in the value
  System.arraycopy(oldCell.getValueArray(), oldCell.getValueOffset(), newCell.getValueArray(),
      newCell.getValueOffset(), oldCell.getValueLength());
  System.arraycopy(cell.getValueArray(), cell.getValueOffset(), newCell.getValueArray(),
      newCell.getValueOffset() + oldCell.getValueLength(), cell.getValueLength());
  // Copy in tag data
  if (tagBytes != null) {
    System
        .arraycopy(tagBytes, 0, newCell.getTagsArray(), newCell.getTagsOffset(), tagBytes.length);
  }
  return newCell;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:HRegion.java

示例4: encode

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public int encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
    throws IOException {
  int klength = KeyValueUtil.keyLength(cell);
  int vlength = cell.getValueLength();

  out.writeInt(klength);
  out.writeInt(vlength);
  CellUtil.writeFlatKey(cell, out);
  out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
  int encodedKvSize = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
  // Write the additional tag into the stream
  if (encodingCtx.getHFileContext().isIncludesTags()) {
    int tagsLength = cell.getTagsLength();
    out.writeShort(tagsLength);
    if (tagsLength > 0) {
      out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
    }
    encodedKvSize += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
  }
  if (encodingCtx.getHFileContext().isIncludesMvcc()) {
    WritableUtils.writeVLong(out, cell.getSequenceId());
    encodedKvSize += WritableUtils.getVIntSize(cell.getSequenceId());
  }
  return encodedKvSize;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:NoOpDataBlockEncoder.java

示例5: getRoughSize

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
static int getRoughSize(final Cell [] cells) {
  int size = 0;
  for (Cell c: cells) {
    size += c.getRowLength() + c.getFamilyLength() + c.getQualifierLength() + c.getValueLength();
    size += Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE;
  }
  return size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:CodecPerformance.java

示例6: internalEncode

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
    DataOutputStream out) throws IOException {
  int klength = KeyValueUtil.keyLength(cell);
  int vlength = cell.getValueLength();

  out.writeInt(klength);
  out.writeInt(vlength);
  CellUtil.writeFlatKey(cell, out);
  out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
  int size = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
  // Write the additional tag into the stream
  if (encodingContext.getHFileContext().isIncludesTags()) {
    int tagsLength = cell.getTagsLength();
    out.writeShort(tagsLength);
    if (tagsLength > 0) {
      out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
    }
    size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
  }
  if (encodingContext.getHFileContext().isIncludesMvcc()) {
    WritableUtils.writeVLong(out, cell.getSequenceId());
    size += WritableUtils.getVIntSize(cell.getSequenceId());
  }
  return size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:CopyKeyDataBlockEncoder.java

示例7: scanRow

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private void scanRow(final Result result, final RowKeyBuilder simpleRowKeyBuilder, final RowKey rowKey,
                     final StatisticType statsType, EventStoreTimeIntervalEnum interval) throws IOException {
    final CellScanner cellScanner = result.cellScanner();
    while (cellScanner.advance()) {
        final Cell cell = cellScanner.current();

        // get the column qualifier
        final byte[] bTimeQualifier = new byte[cell.getQualifierLength()];
        System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), bTimeQualifier, 0,
                cell.getQualifierLength());

        // convert this into a true time, albeit rounded to the column
        // interval granularity
        final long columnIntervalNo = Bytes.toInt(bTimeQualifier);
        final long columnIntervalSize = interval.columnInterval();
        final long columnTimeComponentMillis = columnIntervalNo * columnIntervalSize;
        final long rowKeyPartialTimeMillis = simpleRowKeyBuilder.getPartialTimestamp(rowKey);
        final long fullTimestamp = rowKeyPartialTimeMillis + columnTimeComponentMillis;

        LOGGER.debug("Col: [" + ByteArrayUtils.byteArrayToHex(bTimeQualifier) + "] - ["
                + Bytes.toInt(bTimeQualifier) + "] - [" + fullTimestamp + "] - ["
                + DateUtil.createNormalDateTimeString(fullTimestamp) + "]");

        final byte[] bValue = new byte[cell.getValueLength()];
        System.arraycopy(cell.getValueArray(), cell.getValueOffset(), bValue, 0, cell.getValueLength());

        switch (statsType) {
            case VALUE:
                final ValueCellValue cellValue = new ValueCellValue(bValue);

                LOGGER.debug("Val: " + cellValue);
                break;
            case COUNT:
                LOGGER.debug("Val: " + Bytes.toLong(bValue));
                break;
        }

    }
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:40,代码来源:StatisticsTestService.java

示例8: getRegionState

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Pull the region state from a catalog table {@link Result}.
 * @param r Result to pull the region state from
 * @return the region state, or OPEN if there's no value written.
 */
static State getRegionState(final Result r, int replicaId) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(replicaId));
  if (cell == null || cell.getValueLength() == 0) return State.OPEN;
  return State.valueOf(Bytes.toString(cell.getValueArray(),
    cell.getValueOffset(), cell.getValueLength()));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:RegionStateStore.java

示例9: getLongValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * @return Get the long out of the passed in Cell
 * @throws DoNotRetryIOException
 */
private static long getLongValue(final Cell cell) throws DoNotRetryIOException {
  int len = cell.getValueLength();
  if (len != Bytes.SIZEOF_LONG) {
    // throw DoNotRetryIOException instead of IllegalArgumentException
    throw new DoNotRetryIOException("Field is not a long, it's " + len + " bytes wide");
  }
  return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), len);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:HRegion.java

示例10: getValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public Double getValue(byte[] colFamily, byte[] colQualifier, Cell c)
    throws IOException {
  if (c == null || c.getValueLength() != Bytes.SIZEOF_DOUBLE)
    return null;
  return Bytes.toDouble(c.getValueArray(), c.getValueOffset());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:DoubleColumnInterpreter.java

示例11: getValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
public Long getValue(byte[] colFamily, byte[] colQualifier, Cell kv)
    throws IOException {
  if (kv == null || kv.getValueLength() != Bytes.SIZEOF_LONG)
    return null;
  return Bytes.toLong(kv.getValueArray(), kv.getValueOffset());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:LongColumnInterpreter.java

示例12: addAfterRowFamilyQualifier

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/***************** internal add methods ************************/

  private void addAfterRowFamilyQualifier(Cell cell){
    // timestamps
    timestamps[totalCells] = cell.getTimestamp();
    timestampEncoder.add(cell.getTimestamp());

    // memstore timestamps
    if (includeMvccVersion) {
      mvccVersions[totalCells] = cell.getMvccVersion();
      mvccVersionEncoder.add(cell.getMvccVersion());
      totalUnencodedBytes += WritableUtils.getVIntSize(cell.getMvccVersion());
    }else{
      //must overwrite in case there was a previous version in this array slot
      mvccVersions[totalCells] = 0L;
      if(totalCells == 0){//only need to do this for the first cell added
        mvccVersionEncoder.add(0L);
      }
      //totalUncompressedBytes += 0;//mvccVersion takes zero bytes when disabled
    }

    // types
    typeBytes[totalCells] = cell.getTypeByte();
    cellTypeEncoder.add(cell.getTypeByte());

    // values
    totalValueBytes += cell.getValueLength();
    // double the array each time we run out of space
    values = ArrayUtils.growIfNecessary(values, totalValueBytes, 2 * totalValueBytes);
    CellUtil.copyValueTo(cell, values, valueOffsets[totalCells]);
    if (cell.getValueLength() > maxValueLength) {
      maxValueLength = cell.getValueLength();
    }
    valueOffsets[totalCells + 1] = totalValueBytes;

    // general
    totalUnencodedBytes += KeyValueUtil.length(cell);
    ++totalCells;
  }
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:PrefixTreeEncoder.java

示例13: getKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * currently must do deep copy into new array
 */
@Override
public Cell getKeyValue() {
  Cell cell = ptSearcher.current();
  if (cell == null) {
    return null;
  }
  return new ClonedPrefixTreeCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
      cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
      cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
      cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), cell.getTagsArray(),
      cell.getTagsOffset(), cell.getTagsLength(), cell.getTimestamp(), cell.getTypeByte(),
      cell.getSequenceId());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:PrefixTreeSeeker.java

示例14: internalEncode

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
    DataOutputStream out) throws IOException {
  int klength = KeyValueUtil.keyLength(cell);
  int vlength = cell.getValueLength();
  EncodingState state = encodingContext.getEncodingState();
  if (state.prevCell == null) {
    // copy the key, there is no common prefix with none
    ByteBufferUtils.putCompressedInt(out, klength);
    ByteBufferUtils.putCompressedInt(out, vlength);
    ByteBufferUtils.putCompressedInt(out, 0);
    CellUtil.writeFlatKey(cell, out);
  } else {
    // find a common prefix and skip it
    int common = CellUtil.findCommonPrefixInFlatKey(cell, state.prevCell, true, true);
    ByteBufferUtils.putCompressedInt(out, klength - common);
    ByteBufferUtils.putCompressedInt(out, vlength);
    ByteBufferUtils.putCompressedInt(out, common);
    writeKeyExcludingCommon(cell, common, out);
  }
  // Write the value part
  out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
  int size = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
  size += afterEncodingKeyValue(cell, out, encodingContext);
  state.prevCell = cell;
  return size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:PrefixKeyDeltaEncoder.java

示例15: scanUIDTable

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
public void scanUIDTable() throws IOException {
    // TableConfiguration tableConfiguration = getTableConfiguration();
    final HBaseUniqueIdForwardMapTable uidTable = new HBaseUniqueIdForwardMapTable(hBaseConnection);

    // UniqueIdCache uniqueIdCache = getUinqueIdCache(tableConfiguration);

    final Scan scan = new Scan().setMaxVersions(1).addFamily(Bytes.toBytes("i"));

    final Table tableInterface = uidTable.getTable();
    final ResultScanner scanner = tableInterface.getScanner(scan);

    final Writer writerU = Files.newBufferedWriter(new File("UID_U.csv").toPath(), UTF_8);
    final Writer writerV = Files.newBufferedWriter(new File("UID_V.csv").toPath(), UTF_8);

    String line = "";

    LOGGER.info("Dumping contents of UID table");

    for (final Result result : scanner) {
        final byte[] rowKey = result.getRow();
        String colQual;
        String type = "";
        byte[] valueColValue = null;

        final CellScanner cellScanner = result.cellScanner();
        while (cellScanner.advance()) {
            final Cell cell = cellScanner.current();

            // get the column qualifier
            final byte[] bcolQual = new byte[cell.getQualifierLength()];
            System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), bcolQual, 0,
                    cell.getQualifierLength());

            colQual = Bytes.toString(bcolQual);

            final byte[] bCellVal = new byte[cell.getValueLength()];
            System.arraycopy(cell.getValueArray(), cell.getValueOffset(), bCellVal, 0, cell.getValueLength());

            if (colQual.equals("t")) {
                // type column
                type = Bytes.toString(bCellVal);

            } else if (colQual.equals("v")) {
                // value column
                valueColValue = bCellVal;
            }
        }

        if (type.equals("U")) {
            // row key is a UID o convert that to hex and convert the value
            // col value to a string

            line = type + "," + ByteArrayUtils.byteArrayToHex(rowKey) + "," + Bytes.toString(valueColValue);

            writerU.write(line + "\n");

        } else {
            line = type + "," + Bytes.toString(rowKey) + "," + ByteArrayUtils.byteArrayToHex(valueColValue);

            writerV.write(line + "\n");
        }

    }

    scanner.close();
    HBaseTable.closeTable(tableInterface);

    writerU.close();
    writerV.close();

}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:72,代码来源:StatisticsTestService.java


注:本文中的org.apache.hadoop.hbase.Cell.getValueLength方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。