当前位置: 首页>>代码示例>>Java>>正文


Java Cell.getTimestamp方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.Cell.getTimestamp方法的典型用法代码示例。如果您正苦于以下问题:Java Cell.getTimestamp方法的具体用法?Java Cell.getTimestamp怎么用?Java Cell.getTimestamp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.Cell的用法示例。


在下文中一共展示了Cell.getTimestamp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: hashResult

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
public void hashResult(Result result) {
  if (!batchStarted) {
    throw new RuntimeException("Cannot add to batch that has not been started.");
  }
  for (Cell cell : result.rawCells()) {
    int rowLength = cell.getRowLength();
    int familyLength = cell.getFamilyLength();
    int qualifierLength = cell.getQualifierLength();
    int valueLength = cell.getValueLength();
    digest.update(cell.getRowArray(), cell.getRowOffset(), rowLength);
    digest.update(cell.getFamilyArray(), cell.getFamilyOffset(), familyLength);
    digest.update(cell.getQualifierArray(), cell.getQualifierOffset(), qualifierLength);
    long ts = cell.getTimestamp();
    for (int i = 8; i > 0; i--) {
      digest.update((byte) ts);
      ts >>>= 8;
    }
    digest.update(cell.getValueArray(), cell.getValueOffset(), valueLength);
    
    batchSize += rowLength + familyLength + qualifierLength + 8 + valueLength;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:HashTable.java

示例2: recoverClusteringResult

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
public static Result recoverClusteringResult(Result raw, byte[] family, byte[] qualifier) {
  if (raw == null) return null;
  byte[][] indexColumn = IndexPutParser.parseIndexRowKey(raw.getRow());
  List<KeyValue> list = new ArrayList<>(raw.listCells().size() + 1);
  for (Cell cell : raw.listCells()) {
    byte[] tag = cell.getTagsArray();
    if (tag != null && tag.length > KeyValue.MAX_TAGS_LENGTH) tag = null;
    KeyValue kv =
        new KeyValue(indexColumn[0], CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
            cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
            CellUtil.cloneValue(cell), tag);
    list.add(kv);
  }
  list.add(new KeyValue(indexColumn[0], family, qualifier, indexColumn[1]));
  Collections.sort(list, KeyValue.COMPARATOR);
  return new Result(list);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:BaseIndexScanner.java

示例3: SerializableCell

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Copy data from {@code Cell} instance.
 *
 * @param cell
 */
public SerializableCell( Cell cell ) {
    rowKey = CellUtil.cloneRow(cell);
    family = CellUtil.cloneFamily(cell);
    qualifier = CellUtil.cloneQualifier(cell);
    value = CellUtil.cloneValue(cell);
    timestamp = cell.getTimestamp();
    type = cell.getTypeByte();
}
 
开发者ID:i-knowledge,项目名称:hbase-client,代码行数:14,代码来源:SerializableCell.java

示例4: isDeleted

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Check if the specified KeyValue buffer has been deleted by a previously
 * seen delete.
 * @param kv
 * @param ds
 * @return True is the specified KeyValue is deleted, false if not
 */
public boolean isDeleted(final Cell kv, final NavigableSet<KeyValue> ds) {
  if (deletes == null || deletes.isEmpty()) return false;
  for (KeyValue d: ds) {
    long kvts = kv.getTimestamp();
    long dts = d.getTimestamp();
    if (CellUtil.isDeleteFamily(d)) {
      if (kvts <= dts) return true;
      continue;
    }
    // Check column
    int ret = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(),
        kv.getQualifierLength(),
      d.getQualifierArray(), d.getQualifierOffset(), d.getQualifierLength());
    if (ret <= -1) {
      // This delete is for an earlier column.
      continue;
    } else if (ret >= 1) {
      // Beyond this kv.
      break;
    }
    // Check Timestamp
    if (kvts > dts) return false;

    // Check Type
    switch (KeyValue.Type.codeToType(d.getType())) {
      case Delete: return kvts == dts;
      case DeleteColumn: return true;
      default: continue;
    }
  }
  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:GetClosestRowBeforeTracker.java

示例5: add

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Add the specified KeyValue to the list of deletes to check against for
 * this row operation.
 * <p>
 * This is called when a Delete is encountered.
 * @param cell - the delete cell
 */
@Override
public void add(Cell cell) {
  long timestamp = cell.getTimestamp();
  int qualifierOffset = cell.getQualifierOffset();
  int qualifierLength = cell.getQualifierLength();
  byte type = cell.getTypeByte();
  if (!hasFamilyStamp || timestamp > familyStamp) {
    if (type == KeyValue.Type.DeleteFamily.getCode()) {
      hasFamilyStamp = true;
      familyStamp = timestamp;
      return;
    } else if (type == KeyValue.Type.DeleteFamilyVersion.getCode()) {
      familyVersionStamps.add(timestamp);
      return;
    }

    if (deleteBuffer != null && type < deleteType) {
      // same column, so ignore less specific delete
      if (Bytes.equals(deleteBuffer, deleteOffset, deleteLength,
          cell.getQualifierArray(), qualifierOffset, qualifierLength)){
        return;
      }
    }
    // new column, or more general delete type
    deleteBuffer = cell.getQualifierArray();
    deleteOffset = qualifierOffset;
    deleteLength = qualifierLength;
    deleteType = type;
    deleteTimestamp = timestamp;
  }
  // missing else is never called.
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:ScanDeleteTracker.java

示例6: prepareExpectedKVs

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
private void prepareExpectedKVs(long latestDelTS) {
  final List<Cell> filteredKVs = new ArrayList<Cell>();
  for (Cell kv : expectedKVs) {
    if (kv.getTimestamp() > latestDelTS || latestDelTS == -1) {
      filteredKVs.add(kv);
    }
  }
  expectedKVs = filteredKVs;
  Collections.sort(expectedKVs, KeyValue.COMPARATOR);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:TestSeekOptimizations.java

示例7: testDelete_CheckTimestampUpdated

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Test
public void testDelete_CheckTimestampUpdated() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte[] row1 = Bytes.toBytes("row1");
  byte[] col1 = Bytes.toBytes("col1");
  byte[] col2 = Bytes.toBytes("col2");
  byte[] col3 = Bytes.toBytes("col3");

  // Setting up region
  String method = this.getName();
  this.region = initHRegion(tableName, method, CONF, fam1);
  try {
    // Building checkerList
    List<Cell> kvs = new ArrayList<Cell>();
    kvs.add(new KeyValue(row1, fam1, col1, null));
    kvs.add(new KeyValue(row1, fam1, col2, null));
    kvs.add(new KeyValue(row1, fam1, col3, null));

    NavigableMap<byte[], List<Cell>> deleteMap = new TreeMap<byte[], List<Cell>>(
        Bytes.BYTES_COMPARATOR);
    deleteMap.put(fam1, kvs);
    region.delete(deleteMap, Durability.SYNC_WAL);

    // extract the key values out the memstore:
    // This is kinda hacky, but better than nothing...
    long now = System.currentTimeMillis();
    DefaultMemStore memstore = (DefaultMemStore) ((HStore) region.getStore(fam1)).memstore;
    Cell firstCell = memstore.cellSet.first();
    assertTrue(firstCell.getTimestamp() <= now);
    now = firstCell.getTimestamp();
    for (Cell cell : memstore.cellSet) {
      assertTrue(cell.getTimestamp() <= now);
      now = cell.getTimestamp();
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:TestHRegion.java

示例8: getMap

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Map of families to all versions of its qualifiers and values.
 * <p>
 * Returns a three level Map of the form:
 * <code>Map&amp;family,Map&lt;qualifier,Map&lt;timestamp,value&gt;&gt;&gt;</code>
 * <p>
 * Note: All other map returning methods make use of this map internally.
 * @return map from families to qualifiers to versions
 */
public NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> getMap() {
  if (this.familyMap != null) {
    return this.familyMap;
  }
  if(isEmpty()) {
    return null;
  }
  this.familyMap = new TreeMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>(Bytes.BYTES_COMPARATOR);
  for(Cell kv : this.cells) {
    byte [] family = CellUtil.cloneFamily(kv);
    NavigableMap<byte[], NavigableMap<Long, byte[]>> columnMap =
      familyMap.get(family);
    if(columnMap == null) {
      columnMap = new TreeMap<byte[], NavigableMap<Long, byte[]>>
        (Bytes.BYTES_COMPARATOR);
      familyMap.put(family, columnMap);
    }
    byte [] qualifier = CellUtil.cloneQualifier(kv);
    NavigableMap<Long, byte[]> versionMap = columnMap.get(qualifier);
    if(versionMap == null) {
      versionMap = new TreeMap<Long, byte[]>(new Comparator<Long>() {
        @Override
        public int compare(Long l1, Long l2) {
          return l2.compareTo(l1);
        }
      });
      columnMap.put(qualifier, versionMap);
    }
    Long timestamp = kv.getTimestamp();
    byte [] value = CellUtil.cloneValue(kv);

    versionMap.put(timestamp, value);
  }
  return this.familyMap;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:Result.java

示例9: filterKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell v) {
  if (this.timestamps.contains(v.getTimestamp())) {
    return ReturnCode.INCLUDE;
  } else if (v.getTimestamp() < minTimeStamp) {
    // The remaining versions of this column are guaranteed
    // to be lesser than all of the other values.
    return ReturnCode.NEXT_COL;
  }
  return ReturnCode.SKIP;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TimestampsFilter.java

示例10: addAfterRowFamilyQualifier

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/***************** internal add methods ************************/

  private void addAfterRowFamilyQualifier(Cell cell){
    // timestamps
    timestamps[totalCells] = cell.getTimestamp();
    timestampEncoder.add(cell.getTimestamp());

    // memstore timestamps
    if (includeMvccVersion) {
      mvccVersions[totalCells] = cell.getMvccVersion();
      mvccVersionEncoder.add(cell.getMvccVersion());
      totalUnencodedBytes += WritableUtils.getVIntSize(cell.getMvccVersion());
    }else{
      //must overwrite in case there was a previous version in this array slot
      mvccVersions[totalCells] = 0L;
      if(totalCells == 0){//only need to do this for the first cell added
        mvccVersionEncoder.add(0L);
      }
      //totalUncompressedBytes += 0;//mvccVersion takes zero bytes when disabled
    }

    // types
    typeBytes[totalCells] = cell.getTypeByte();
    cellTypeEncoder.add(cell.getTypeByte());

    // values
    totalValueBytes += cell.getValueLength();
    // double the array each time we run out of space
    values = ArrayUtils.growIfNecessary(values, totalValueBytes, 2 * totalValueBytes);
    CellUtil.copyValueTo(cell, values, valueOffsets[totalCells]);
    if (cell.getValueLength() > maxValueLength) {
      maxValueLength = cell.getValueLength();
    }
    valueOffsets[totalCells + 1] = totalValueBytes;

    // general
    totalUnencodedBytes += KeyValueUtil.length(cell);
    ++totalCells;
  }
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:PrefixTreeEncoder.java

示例11: getKeyValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * currently must do deep copy into new array
 */
@Override
public Cell getKeyValue() {
  Cell cell = ptSearcher.current();
  if (cell == null) {
    return null;
  }
  return new ClonedPrefixTreeCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
      cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
      cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
      cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), cell.getTagsArray(),
      cell.getTagsOffset(), cell.getTagsLength(), cell.getTimestamp(), cell.getTypeByte(),
      cell.getSequenceId());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:PrefixTreeSeeker.java

示例12: add

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
@Override
public void add(Cell delCell) {
  //Cannot call super.add because need to find if the delete needs to be considered
  long timestamp = delCell.getTimestamp();
  int qualifierOffset = delCell.getQualifierOffset();
  int qualifierLength = delCell.getQualifierLength();
  byte type = delCell.getTypeByte();
  if (type == KeyValue.Type.DeleteFamily.getCode()) {
    hasFamilyStamp = true;
    boolean hasVisTag = extractDeleteCellVisTags(delCell, KeyValue.Type.DeleteFamily);
    if (!hasVisTag && timestamp > familyStamp) {
      familyStamp = timestamp;
    }
    return;
  } else if (type == KeyValue.Type.DeleteFamilyVersion.getCode()) {
    familyVersionStamps.add(timestamp);
    extractDeleteCellVisTags(delCell, KeyValue.Type.DeleteFamilyVersion);
    return;
  }
  // new column, or more general delete type
  if (deleteBuffer != null) {
    if (Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength, delCell.getQualifierArray(),
        qualifierOffset, qualifierLength) != 0) {
      // A case where there are deletes for a column qualifier but there are
      // no corresponding puts for them. Rare case.
      visibilityTagsDeleteColumns = null;
      visiblityTagsDeleteColumnVersion = null;
    } else if (type == KeyValue.Type.Delete.getCode() && (deleteTimestamp != timestamp)) {
      // there is a timestamp change which means we could clear the list
      // when ts is same and the vis tags are different we need to collect
      // them all. Interesting part is that in the normal case of puts if
      // there are 2 cells with same ts and diff vis tags only one of them is
      // returned. Handling with a single List<Tag> would mean that only one
      // of the cell would be considered. Doing this as a precaution.
      // Rare cases.
      visiblityTagsDeleteColumnVersion = null;
    }
  }
  deleteBuffer = delCell.getQualifierArray();
  deleteOffset = qualifierOffset;
  deleteLength = qualifierLength;
  deleteType = type;
  deleteTimestamp = timestamp;
  extractDeleteCellVisTags(delCell, KeyValue.Type.codeToType(type));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:46,代码来源:VisibilityScanDeleteTracker.java

示例13: requestSeek

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Pretend we have done a seek but don't do it yet, if possible. The hope is
 * that we find requested columns in more recent files and won't have to seek
 * in older files. Creates a fake key/value with the given row/column and the
 * highest (most recent) possible timestamp we might get from this file. When
 * users of such "lazy scanner" need to know the next KV precisely (e.g. when
 * this scanner is at the top of the heap), they run {@link #enforceSeek()}.
 * Note that this function does guarantee that the current KV of this scanner
 * will be advanced to at least the given KV. Because of this, it does have
 * to do a real seek in cases when the seek timestamp is older than the
 * highest timestamp of the file, e.g. when we are trying to seek to the next
 * row/column and use OLDEST_TIMESTAMP in the seek key.
 */
@Override public boolean requestSeek(Cell kv, boolean forward, boolean useBloom)
    throws IOException {
  if (kv.getFamilyLength() == 0) {
    useBloom = false;
  }

  boolean haveToSeek = true;
  if (useBloom) {
    // check ROWCOL Bloom filter first.
    if (reader.getBloomFilterType() == BloomType.ROWCOL) {
      haveToSeek = reader
          .passesGeneralBloomFilter(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
              kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
    } else if (this.matcher != null && !matcher.hasNullColumnInQuery() && ((
        CellUtil.isDeleteFamily(kv) || CellUtil.isDeleteFamilyVersion(kv)))) {
      // if there is no such delete family kv in the store file,
      // then no need to seek.
      haveToSeek = reader
          .passesDeleteFamilyBloomFilter(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
    }
  }

  delayedReseek = forward;
  delayedSeekKV = kv;

  if (haveToSeek) {
    // This row/column might be in this store file (or we did not use the
    // Bloom filter), so we still need to seek.
    realSeekDone = false;
    long maxTimestampInFile = reader.getMaxTimestamp();
    long seekTimestamp = kv.getTimestamp();
    if (seekTimestamp > maxTimestampInFile) {
      // Create a fake key that is not greater than the real next key.
      // (Lower timestamps correspond to higher KVs.)
      // To understand this better, consider that we are asked to seek to
      // a higher timestamp than the max timestamp in this file. We know that
      // the next point when we have to consider this file again is when we
      // pass the max timestamp of this file (with the same row/column).
      setCurrentCell(KeyValueUtil.createFirstOnRowColTS(kv, maxTimestampInFile));
    } else {
      // This will be the case e.g. when we need to seek to the next
      // row/column, and we don't know exactly what they are, so we set the
      // seek key's timestamp to OLDEST_TIMESTAMP to skip the rest of this
      // row/column.
      enforceSeek();
    }
    return cur != null;
  }

  // Multi-column Bloom filter optimization.
  // Create a fake key/value, so that this scanner only bubbles up to the top
  // of the KeyValueHeap in StoreScanner after we scanned this row/column in
  // all other store files. The query matcher will then just skip this fake
  // key/value and the store scanner will progress to the next column. This
  // is obviously not a "real real" seek, but unlike the fake KV earlier in
  // this method, we want this to be propagated to ScanQueryMatcher.
  setCurrentCell(KeyValueUtil.createLastOnRowCol(kv));

  realSeekDone = true;
  return true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:75,代码来源:StoreFileScanner.java

示例14: isExpired

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * @param cell
 * @return true if the cell is expired
 */
public boolean isExpired(final Cell cell) {
  return cell.getTimestamp() < this.oldestUnexpiredTs ||
    HStore.isCellTTLExpired(cell, this.oldestUnexpiredTs, this.now);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:GetClosestRowBeforeTracker.java

示例15: updateColumnValue

import org.apache.hadoop.hbase.Cell; //导入方法依赖的package包/类
/**
 * Only used by tests. TODO: Remove
 *
 * Given the specs of a column, update it, first by inserting a new record,
 * then removing the old one.  Since there is only 1 KeyValue involved, the memstoreTS
 * will be set to 0, thus ensuring that they instantly appear to anyone. The underlying
 * store will ensure that the insert/delete each are atomic. A scanner/reader will either
 * get the new value, or the old value and all readers will eventually only see the new
 * value after the old was removed.
 *
 * @param row
 * @param family
 * @param qualifier
 * @param newValue
 * @param now
 * @return  Timestamp
 */
@Override
public long updateColumnValue(byte[] row,
                              byte[] family,
                              byte[] qualifier,
                              long newValue,
                              long now) {
  Cell firstCell = KeyValueUtil.createFirstOnRow(row, family, qualifier);
  // Is there a Cell in 'snapshot' with the same TS? If so, upgrade the timestamp a bit.
  SortedSet<Cell> snSs = snapshot.tailSet(firstCell);
  if (!snSs.isEmpty()) {
    Cell snc = snSs.first();
    // is there a matching Cell in the snapshot?
    if (CellUtil.matchingRow(snc, firstCell) && CellUtil.matchingQualifier(snc, firstCell)) {
      if (snc.getTimestamp() == now) {
        // poop,
        now += 1;
      }
    }
  }

  // logic here: the new ts MUST be at least 'now'. But it could be larger if necessary.
  // But the timestamp should also be max(now, mostRecentTsInMemstore)

  // so we cant add the new Cell w/o knowing what's there already, but we also
  // want to take this chance to delete some cells. So two loops (sad)

  SortedSet<Cell> ss = cellSet.tailSet(firstCell);
  for (Cell cell : ss) {
    // if this isnt the row we are interested in, then bail:
    if (!CellUtil.matchingColumn(cell, family, qualifier)
        || !CellUtil.matchingRow(cell, firstCell)) {
      break; // rows dont match, bail.
    }

    // if the qualifier matches and it's a put, just RM it out of the cellSet.
    if (cell.getTypeByte() == KeyValue.Type.Put.getCode() &&
        cell.getTimestamp() > now && CellUtil.matchingQualifier(firstCell, cell)) {
      now = cell.getTimestamp();
    }
  }

  // create or update (upsert) a new Cell with
  // 'now' and a 0 memstoreTS == immediately visible
  List<Cell> cells = new ArrayList<Cell>(1);
  cells.add(new KeyValue(row, family, qualifier, now, Bytes.toBytes(newValue)));
  return upsert(cells, 1L);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:65,代码来源:DefaultMemStore.java


注:本文中的org.apache.hadoop.hbase.Cell.getTimestamp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。