当前位置: 首页>>代码示例>>Java>>正文


Java Cell.getQualifierLength方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.Cell.getQualifierLength方法的典型用法代码示例。如果您正苦于以下问题:Java Cell.getQualifierLength方法的具体用法?Java Cell.getQualifierLength怎么用?Java Cell.getQualifierLength使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.Cell的用法示例。


在下文中一共展示了Cell.getQualifierLength方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: hashResult

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
public void hashResult(Result result) {
  if (!batchStarted) {
    throw new RuntimeException("Cannot add to batch that has not been started.");
  }
  for (Cell cell : result.rawCells()) {
    int rowLength = cell.getRowLength();
    int familyLength = cell.getFamilyLength();
    int qualifierLength = cell.getQualifierLength();
    int valueLength = cell.getValueLength();
    digest.update(cell.getRowArray(), cell.getRowOffset(), rowLength);
    digest.update(cell.getFamilyArray(), cell.getFamilyOffset(), familyLength);
    digest.update(cell.getQualifierArray(), cell.getQualifierOffset(), qualifierLength);
    long ts = cell.getTimestamp();
    for (int i = 8; i > 0; i--) {
      digest.update((byte) ts);
      ts >>>= 8;
    }
    digest.update(cell.getValueArray(), cell.getValueOffset(), valueLength);
    
    batchSize += rowLength + familyLength + qualifierLength + 8 + valueLength;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:HashTable.java

示例2: getNewCell

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private static Cell getNewCell(final byte[] row, final long ts, final Cell cell,
    final Cell oldCell, final byte[] tagBytes) {
  // allocate an empty cell once
  Cell newCell = new KeyValue(row.length, cell.getFamilyLength(), cell.getQualifierLength(), ts,
      KeyValue.Type.Put, oldCell.getValueLength() + cell.getValueLength(),
      tagBytes == null ? 0 : tagBytes.length);
  // copy in row, family, and qualifier
  System.arraycopy(cell.getRowArray(), cell.getRowOffset(), newCell.getRowArray(),
      newCell.getRowOffset(), cell.getRowLength());
  System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), newCell.getFamilyArray(),
      newCell.getFamilyOffset(), cell.getFamilyLength());
  System
      .arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), newCell.getQualifierArray(),
          newCell.getQualifierOffset(), cell.getQualifierLength());
  // copy in the value
  System.arraycopy(oldCell.getValueArray(), oldCell.getValueOffset(), newCell.getValueArray(),
      newCell.getValueOffset(), oldCell.getValueLength());
  System.arraycopy(cell.getValueArray(), cell.getValueOffset(), newCell.getValueArray(),
      newCell.getValueOffset() + oldCell.getValueLength(), cell.getValueLength());
  // Copy in tag data
  if (tagBytes != null) {
    System
        .arraycopy(tagBytes, 0, newCell.getTagsArray(), newCell.getTagsOffset(), tagBytes.length);
  }
  return newCell;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:HRegion.java

示例3: getRoughSize

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
static int getRoughSize(final Cell [] cells) {
  int size = 0;
  for (Cell c: cells) {
    size += c.getRowLength() + c.getFamilyLength() + c.getQualifierLength() + c.getValueLength();
    size += Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE;
  }
  return size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:CodecPerformance.java

示例4: scanRow

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private void scanRow(final Result result, final RowKeyBuilder simpleRowKeyBuilder, final RowKey rowKey,
                     final StatisticType statsType, EventStoreTimeIntervalEnum interval) throws IOException {
    final CellScanner cellScanner = result.cellScanner();
    while (cellScanner.advance()) {
        final Cell cell = cellScanner.current();

        // get the column qualifier
        final byte[] bTimeQualifier = new byte[cell.getQualifierLength()];
        System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), bTimeQualifier, 0,
                cell.getQualifierLength());

        // convert this into a true time, albeit rounded to the column
        // interval granularity
        final long columnIntervalNo = Bytes.toInt(bTimeQualifier);
        final long columnIntervalSize = interval.columnInterval();
        final long columnTimeComponentMillis = columnIntervalNo * columnIntervalSize;
        final long rowKeyPartialTimeMillis = simpleRowKeyBuilder.getPartialTimestamp(rowKey);
        final long fullTimestamp = rowKeyPartialTimeMillis + columnTimeComponentMillis;

        LOGGER.debug("Col: [" + ByteArrayUtils.byteArrayToHex(bTimeQualifier) + "] - ["
                + Bytes.toInt(bTimeQualifier) + "] - [" + fullTimestamp + "] - ["
                + DateUtil.createNormalDateTimeString(fullTimestamp) + "]");

        final byte[] bValue = new byte[cell.getValueLength()];
        System.arraycopy(cell.getValueArray(), cell.getValueOffset(), bValue, 0, cell.getValueLength());

        switch (statsType) {
            case VALUE:
                final ValueCellValue cellValue = new ValueCellValue(bValue);

                LOGGER.debug("Val: " + cellValue);
                break;
            case COUNT:
                LOGGER.debug("Val: " + Bytes.toLong(bValue));
                break;
        }

    }
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:40,代码来源:StatisticsTestService.java

示例5: add

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Add the specified KeyValue to the list of deletes to check against for
 * this row operation.
 * <p>
 * This is called when a Delete is encountered.
 * @param cell - the delete cell
 */
@Override
public void add(Cell cell) {
  long timestamp = cell.getTimestamp();
  int qualifierOffset = cell.getQualifierOffset();
  int qualifierLength = cell.getQualifierLength();
  byte type = cell.getTypeByte();
  if (!hasFamilyStamp || timestamp > familyStamp) {
    if (type == KeyValue.Type.DeleteFamily.getCode()) {
      hasFamilyStamp = true;
      familyStamp = timestamp;
      return;
    } else if (type == KeyValue.Type.DeleteFamilyVersion.getCode()) {
      familyVersionStamps.add(timestamp);
      return;
    }

    if (deleteBuffer != null && type < deleteType) {
      // same column, so ignore less specific delete
      if (Bytes.equals(deleteBuffer, deleteOffset, deleteLength,
          cell.getQualifierArray(), qualifierOffset, qualifierLength)){
        return;
      }
    }
    // new column, or more general delete type
    deleteBuffer = cell.getQualifierArray();
    deleteOffset = qualifierOffset;
    deleteLength = qualifierLength;
    deleteType = type;
    deleteTimestamp = timestamp;
  }
  // missing else is never called.
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:ScanDeleteTracker.java

示例6: matchingColumn

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private boolean matchingColumn(Cell keyValue, byte[] family, byte[] qualifier) {
  if (!Bytes.equals(family, keyValue.getFamilyArray())) {
    return false;
  }
  if (qualifier == null || qualifier.length == 0) {
    if (keyValue.getQualifierLength() == 0) {
      return true;
    }
    return false;
  }
  return Bytes.equals(qualifier, keyValue.getQualifierArray());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TimeRangeFilter.java

示例7: filterKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell v) {
  int qualifierLength = v.getQualifierLength();
  if (qualifierLength > 0) {
    if (doCompare(this.compareOp, this.comparator, v.getQualifierArray(),
        v.getQualifierOffset(), qualifierLength)) {
      return ReturnCode.SKIP;
    }
  }
  return ReturnCode.INCLUDE;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:QualifierFilter.java

示例8: filterKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell kv) {
  // TODO have a column compare method in Cell
  byte[] buffer = kv.getQualifierArray();
  int qualifierOffset = kv.getQualifierOffset();
  int qualifierLength = kv.getQualifierLength();
  int cmpMin = 1;

  if (this.minColumn != null) {
    cmpMin = Bytes.compareTo(buffer, qualifierOffset, qualifierLength,
        this.minColumn, 0, this.minColumn.length);
  }

  if (cmpMin < 0) {
    return ReturnCode.SEEK_NEXT_USING_HINT;
  }

  if (!this.minColumnInclusive && cmpMin == 0) {
    return ReturnCode.NEXT_COL;
  }

  if (this.maxColumn == null) {
    return ReturnCode.INCLUDE;
  }

  int cmpMax = Bytes.compareTo(buffer, qualifierOffset, qualifierLength,
      this.maxColumn, 0, this.maxColumn.length);

  if (this.maxColumnInclusive && cmpMax <= 0 ||
      !this.maxColumnInclusive && cmpMax < 0) {
    return ReturnCode.INCLUDE;
  }

  return ReturnCode.NEXT_ROW;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:ColumnRangeFilter.java

示例9: getKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * currently must do deep copy into new array
 */
@Override
public Cell getKeyValue() {
  Cell cell = ptSearcher.current();
  if (cell == null) {
    return null;
  }
  return new ClonedPrefixTreeCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
      cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
      cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
      cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), cell.getTagsArray(),
      cell.getTagsOffset(), cell.getTagsLength(), cell.getTimestamp(), cell.getTypeByte(),
      cell.getSequenceId());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:PrefixTreeSeeker.java

示例10: compareTypeBytes

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private int compareTypeBytes(Cell key, Cell right) {
  if (key.getFamilyLength() + key.getQualifierLength() == 0
      && key.getTypeByte() == Type.Minimum.getCode()) {
    // left is "bigger", i.e. it appears later in the sorted order
    return 1;
  }
  if (right.getFamilyLength() + right.getQualifierLength() == 0
      && right.getTypeByte() == Type.Minimum.getCode()) {
    return -1;
  }
  return 0;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:BufferedDataBlockEncoder.java

示例11: scanUIDTable

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
public void scanUIDTable() throws IOException {
    // TableConfiguration tableConfiguration = getTableConfiguration();
    final HBaseUniqueIdForwardMapTable uidTable = new HBaseUniqueIdForwardMapTable(hBaseConnection);

    // UniqueIdCache uniqueIdCache = getUinqueIdCache(tableConfiguration);

    final Scan scan = new Scan().setMaxVersions(1).addFamily(Bytes.toBytes("i"));

    final Table tableInterface = uidTable.getTable();
    final ResultScanner scanner = tableInterface.getScanner(scan);

    final Writer writerU = Files.newBufferedWriter(new File("UID_U.csv").toPath(), UTF_8);
    final Writer writerV = Files.newBufferedWriter(new File("UID_V.csv").toPath(), UTF_8);

    String line = "";

    LOGGER.info("Dumping contents of UID table");

    for (final Result result : scanner) {
        final byte[] rowKey = result.getRow();
        String colQual;
        String type = "";
        byte[] valueColValue = null;

        final CellScanner cellScanner = result.cellScanner();
        while (cellScanner.advance()) {
            final Cell cell = cellScanner.current();

            // get the column qualifier
            final byte[] bcolQual = new byte[cell.getQualifierLength()];
            System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), bcolQual, 0,
                    cell.getQualifierLength());

            colQual = Bytes.toString(bcolQual);

            final byte[] bCellVal = new byte[cell.getValueLength()];
            System.arraycopy(cell.getValueArray(), cell.getValueOffset(), bCellVal, 0, cell.getValueLength());

            if (colQual.equals("t")) {
                // type column
                type = Bytes.toString(bCellVal);

            } else if (colQual.equals("v")) {
                // value column
                valueColValue = bCellVal;
            }
        }

        if (type.equals("U")) {
            // row key is a UID o convert that to hex and convert the value
            // col value to a string

            line = type + "," + ByteArrayUtils.byteArrayToHex(rowKey) + "," + Bytes.toString(valueColValue);

            writerU.write(line + "\n");

        } else {
            line = type + "," + Bytes.toString(rowKey) + "," + ByteArrayUtils.byteArrayToHex(valueColValue);

            writerV.write(line + "\n");
        }

    }

    scanner.close();
    HBaseTable.closeTable(tableInterface);

    writerU.close();
    writerV.close();

}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:72,代码来源:StatisticsTestService.java

示例12: add

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public void add(Cell delCell) {
  //Cannot call super.add because need to find if the delete needs to be considered
  long timestamp = delCell.getTimestamp();
  int qualifierOffset = delCell.getQualifierOffset();
  int qualifierLength = delCell.getQualifierLength();
  byte type = delCell.getTypeByte();
  if (type == KeyValue.Type.DeleteFamily.getCode()) {
    hasFamilyStamp = true;
    boolean hasVisTag = extractDeleteCellVisTags(delCell, KeyValue.Type.DeleteFamily);
    if (!hasVisTag && timestamp > familyStamp) {
      familyStamp = timestamp;
    }
    return;
  } else if (type == KeyValue.Type.DeleteFamilyVersion.getCode()) {
    familyVersionStamps.add(timestamp);
    extractDeleteCellVisTags(delCell, KeyValue.Type.DeleteFamilyVersion);
    return;
  }
  // new column, or more general delete type
  if (deleteBuffer != null) {
    if (Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength, delCell.getQualifierArray(),
        qualifierOffset, qualifierLength) != 0) {
      // A case where there are deletes for a column qualifier but there are
      // no corresponding puts for them. Rare case.
      visibilityTagsDeleteColumns = null;
      visiblityTagsDeleteColumnVersion = null;
    } else if (type == KeyValue.Type.Delete.getCode() && (deleteTimestamp != timestamp)) {
      // there is a timestamp change which means we could clear the list
      // when ts is same and the vis tags are different we need to collect
      // them all. Interesting part is that in the normal case of puts if
      // there are 2 cells with same ts and diff vis tags only one of them is
      // returned. Handling with a single List<Tag> would mean that only one
      // of the cell would be considered. Doing this as a precaution.
      // Rare cases.
      visiblityTagsDeleteColumnVersion = null;
    }
  }
  deleteBuffer = delCell.getQualifierArray();
  deleteOffset = qualifierOffset;
  deleteLength = qualifierLength;
  deleteType = type;
  deleteTimestamp = timestamp;
  extractDeleteCellVisTags(delCell, KeyValue.Type.codeToType(type));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:46,代码来源:VisibilityScanDeleteTracker.java

示例13: writeKeyExcludingCommon

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private void writeKeyExcludingCommon(Cell cell, int commonPrefix, DataOutputStream out)
    throws IOException {
  short rLen = cell.getRowLength();
  if (commonPrefix < rLen + KeyValue.ROW_LENGTH_SIZE) {
    // Previous and current rows are different. Need to write the differing part followed by
    // cf,q,ts and type
    CellUtil.writeRowKeyExcludingCommon(cell, rLen, commonPrefix, out);
    byte fLen = cell.getFamilyLength();
    out.writeByte(fLen);
    out.write(cell.getFamilyArray(), cell.getFamilyOffset(), fLen);
    out.write(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
    out.writeLong(cell.getTimestamp());
    out.writeByte(cell.getTypeByte());
  } else {
    // The full row key part is common. CF part will be common for sure as we deal with Cells in
    // same family. Just need write the differing part in q, ts and type
    commonPrefix = commonPrefix - (rLen + KeyValue.ROW_LENGTH_SIZE)
        - (cell.getFamilyLength() + KeyValue.FAMILY_LENGTH_SIZE);
    int qLen = cell.getQualifierLength();
    int commonQualPrefix = Math.min(commonPrefix, qLen);
    int qualPartLenToWrite = qLen - commonQualPrefix;
    if (qualPartLenToWrite > 0) {
      out.write(cell.getQualifierArray(), cell.getQualifierOffset() + commonQualPrefix,
          qualPartLenToWrite);
    }
    commonPrefix -= commonQualPrefix;
    // Common part in TS also?
    if (commonPrefix > 0) {
      int commonTimestampPrefix = Math.min(commonPrefix, KeyValue.TIMESTAMP_SIZE);
      if (commonTimestampPrefix < KeyValue.TIMESTAMP_SIZE) {
        byte[] curTsBuf = Bytes.toBytes(cell.getTimestamp());
        out.write(curTsBuf, commonTimestampPrefix, KeyValue.TIMESTAMP_SIZE
            - commonTimestampPrefix);
      }
      commonPrefix -= commonTimestampPrefix;
      if (commonPrefix == 0) {
        out.writeByte(cell.getTypeByte());
      }
    } else {
      out.writeLong(cell.getTimestamp());
      out.writeByte(cell.getTypeByte());
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:PrefixKeyDeltaEncoder.java


注:本文中的org.apache.hadoop.hbase.Cell.getQualifierLength方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。