当前位置: 首页>>代码示例>>Java>>正文


Java Cell类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.Cell的典型用法代码示例。如果您正苦于以下问题:Java Cell类的具体用法?Java Cell怎么用?Java Cell使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Cell类属于org.apache.hadoop.hbase包,在下文中一共展示了Cell类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getScanResult

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
private List<Cell> getScanResult(byte[] startRow, byte[] stopRow, HTable ht) throws IOException {
  Scan scan = new Scan();
  scan.setMaxVersions();
  if(!Bytes.toString(startRow).isEmpty()) {
    scan.setStartRow(startRow);
  }
  if(!Bytes.toString(stopRow).isEmpty()) {
    scan.setStopRow(stopRow);
  }
  ResultScanner scanner = ht.getScanner(scan);
  List<Cell> kvList = new ArrayList<Cell>();
  Result r;
  while ((r = scanner.next()) != null) {
    for (Cell kv : r.listCells()) {
      kvList.add(kv);
    }
  }
  return kvList;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestMultiRowRangeFilter.java

示例2: encode

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
@Override
public int encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
    throws IOException {
  int klength = KeyValueUtil.keyLength(cell);
  int vlength = cell.getValueLength();

  out.writeInt(klength);
  out.writeInt(vlength);
  CellUtil.writeFlatKey(cell, out);
  out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
  int encodedKvSize = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
  // Write the additional tag into the stream
  if (encodingCtx.getHFileContext().isIncludesTags()) {
    int tagsLength = cell.getTagsLength();
    out.writeShort(tagsLength);
    if (tagsLength > 0) {
      out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
    }
    encodedKvSize += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
  }
  if (encodingCtx.getHFileContext().isIncludesMvcc()) {
    WritableUtils.writeVLong(out, cell.getSequenceId());
    encodedKvSize += WritableUtils.getVIntSize(cell.getSequenceId());
  }
  return encodedKvSize;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:NoOpDataBlockEncoder.java

示例3: testMultiRowRangeFilterWithoutRangeOverlap

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
@Test
public void testMultiRowRangeFilterWithoutRangeOverlap() throws IOException {
  tableName = Bytes.toBytes("testMultiRowRangeFilterWithoutRangeOverlap");
  HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(numRows, ht, family, qf, value);

  Scan scan = new Scan();
  scan.setMaxVersions();

  List<RowRange> ranges = new ArrayList<RowRange>();
  ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
  ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
  ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false));

  MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
  scan.setFilter(filter);
  int resultsSize = getResultsSize(ht, scan);
  LOG.info("found " + resultsSize + " results");
  List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(20), ht);
  List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
  List<Cell> results3 = getScanResult(Bytes.toBytes(60), Bytes.toBytes(70), ht);

  assertEquals(results1.size() + results2.size() + results3.size(), resultsSize);

  ht.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestMultiRowRangeFilter.java

示例4: verifyExpectedCounts

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
private void verifyExpectedCounts(Table table, Scan scan, int expectedRowCount,
    int expectedCellCount) throws Exception {
  ResultScanner scanner = table.getScanner(scan);

  int rowCount = 0;
  int cellCount = 0;
  Result r = null;
  while ((r = scanner.next()) != null) {
    rowCount++;
    for (Cell c : r.rawCells()) {
      cellCount++;
    }
  }

  assertTrue("Expected row count: " + expectedRowCount + " Actual row count: " + rowCount,
    expectedRowCount == rowCount);
  assertTrue("Expected cell count: " + expectedCellCount + " Actual cell count: " + cellCount,
    expectedCellCount == cellCount);
  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestScannersFromClientSide.java

示例5: testDeletedRowThenGoodRow

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
public void testDeletedRowThenGoodRow() throws IOException {
  KeyValue [] kvs = new KeyValue [] {
      KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"),
      KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care"),
      KeyValueTestUtil.create("R2", "cf", "a", 20, KeyValue.Type.Put, "dont-care")
  };
  List<KeyValueScanner> scanners = scanFixture(kvs);
  Scan scanSpec = new Scan(Bytes.toBytes("R1"));
  StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType,
      getCols("a"), scanners);

  List<Cell> results = new ArrayList<Cell>();
  assertEquals(true, scan.next(results));
  assertEquals(0, results.size());

  assertEquals(true, scan.next(results));
  assertEquals(1, results.size());
  assertEquals(kvs[2], results.get(0));

  assertEquals(false, scan.next(results));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestStoreScanner.java

示例6: printKV

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
private void printKV(Cell keyValue) {
  StringBuilder sb = new StringBuilder();
  sb.append("rowkey=" + Bytes.toStringBinary(keyValue.getRow()));
  int i = 0;
  int[] arr = MDUtils.bitwiseUnzip(keyValue.getRow(), 3);
  sb.append(", indicating=");
  for (Map.Entry<byte[], TreeSet<byte[]>> entry : relation.getIndexFamilyMap().entrySet()) {
    for (byte[] qualifer : entry.getValue()) {
      sb.append("[").append(Bytes.toString(entry.getKey())).append(":")
          .append(Bytes.toString(qualifer)).append("]=").append(arr[i]).append(",");
      ++i;
    }
  }
  sb.append(", rawRowkey=" + Bytes.toInt(keyValue.getQualifier()));
  System.out.println(sb.toString());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:HFileTest.java

示例7: getExistingLabelsWithAuths

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
protected List<List<Cell>> getExistingLabelsWithAuths() throws IOException {
  Scan scan = new Scan();
  RegionScanner scanner = labelsRegion.getScanner(scan);
  List<List<Cell>> existingLabels = new ArrayList<List<Cell>>();
  try {
    while (true) {
      List<Cell> cells = new ArrayList<Cell>();
      scanner.next(cells);
      if (cells.isEmpty()) {
        break;
      }
      existingLabels.add(cells);
    }
  } finally {
    scanner.close();
  }
  return existingLabels;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:DefaultVisibilityLabelServiceImpl.java

示例8: walkForwardInSingleRow

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
private boolean walkForwardInSingleRow(final HFileScanner scanner, final KeyValue firstOnRow,
    final GetClosestRowBeforeTracker state) throws IOException {
  boolean foundCandidate = false;
  do {
    Cell kv = scanner.getKeyValue();
    // If we are not in the row, skip.
    if (this.comparator.compareRows(kv, firstOnRow) < 0) continue;
    // Did we go beyond the target row? If so break.
    if (state.isTooFar(kv, firstOnRow)) break;
    if (state.isExpired(kv)) {
      continue;
    }
    // If we added something, this row is a contender. break.
    if (state.handle(kv)) {
      foundCandidate = true;
      break;
    }
  } while (scanner.next());
  return foundCandidate;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:HStore.java

示例9: addSize

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
/**
 * Method to account for the size of retained cells and retained data blocks.
 * @return an object that represents the last referenced block from this response.
 */
Object addSize(RpcCallContext context, Result r, Object lastBlock) {
  if (context != null && !r.isEmpty()) {
    for (Cell c : r.rawCells()) {
      context.incrementResponseCellSize(CellUtil.estimatedHeapSizeOf(c));
      // We're using the last block being the same as the current block as
      // a proxy for pointing to a new block. This won't be exact.
      // If there are multiple gets that bounce back and forth
      // Then it's possible that this will over count the size of
      // referenced blocks. However it's better to over count and
      // use two RPC's than to OOME the RegionServer.
      byte[] valueArray = c.getValueArray();
      if (valueArray != lastBlock) {
        context.incrementResponseBlockSize(valueArray.length);
        lastBlock = valueArray;
      }
    }
  }
  return lastBlock;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:RSRpcServices.java

示例10: append

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
@Override
public long append(HTableDescriptor htd, HRegionInfo info, WALKey key, WALEdit edits,
                   boolean inMemstore) {
  if (!this.listeners.isEmpty()) {
    final long start = System.nanoTime();
    long len = 0;
    for (Cell cell : edits.getCells()) {
      len += CellUtil.estimatedSerializedSizeOf(cell);
    }
    final long elapsed = (System.nanoTime() - start)/1000000l;
    for (WALActionsListener listener : this.listeners) {
      listener.postAppend(len, elapsed);
    }
  }
  return -1;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:DisabledWALProvider.java

示例11: write

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  LOG.warn("WALEdit is being serialized to writable - only expected in test code");
  out.writeInt(VERSION_2);
  out.writeInt(cells.size());
  // We interleave the two lists for code simplicity
  for (Cell cell : cells) {
    // This is not used in any of the core code flows so it is just fine to convert to KV
    KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
    if (compressionContext != null) {
      KeyValueCompression.writeKV(out, kv, compressionContext);
    } else{
      KeyValue.write(kv, out);
    }
  }
  if (scopes == null) {
    out.writeInt(0);
  } else {
    out.writeInt(scopes.size());
    for (byte[] key : scopes.keySet()) {
      Bytes.writeByteArray(out, key);
      out.writeInt(scopes.get(key));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:WALEdit.java

示例12: preGetOp

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
@Override
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
                     final Get get, final List<Cell> results) throws IOException {

  if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
    CountDownLatch latch = cdl.get();
    try {
      if (sleepTime.get() > 0) {
        LOG.info("Sleeping for " + sleepTime.get() + " ms");
        Thread.sleep(sleepTime.get());
      } else if (latch.getCount() > 0) {
        LOG.info("Waiting for the counterCountDownLatch");
        latch.await(2, TimeUnit.MINUTES); // To help the tests to finish.
        if (latch.getCount() > 0) {
          throw new RuntimeException("Can't wait more");
        }
      }
    } catch (InterruptedException e1) {
      LOG.error(e1);
    }
  } else {
    LOG.info("We're not the primary replicas.");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestReplicaWithCluster.java

示例13: getWALEntryfilter

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
@Override
public WALEntryFilter getWALEntryfilter() {
  return new ChainWALEntryFilter(super.getWALEntryfilter(), new WALEntryFilter() {
    @Override
    public Entry filter(Entry entry) {
      ArrayList<Cell> cells = entry.getEdit().getCells();
      int size = cells.size();
      for (int i = size-1; i >= 0; i--) {
        Cell cell = cells.get(i);
        if (!Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
          row, 0, row.length)) {
          cells.remove(i);
        }
      }
      return entry;
    }
  });
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestReplicationEndpoint.java

示例14: scanRow

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
private void scanRow(final Result result, final RowKeyBuilder simpleRowKeyBuilder, final RowKey rowKey,
                     final StatisticType statsType, EventStoreTimeIntervalEnum interval) throws IOException {
    final CellScanner cellScanner = result.cellScanner();
    while (cellScanner.advance()) {
        final Cell cell = cellScanner.current();

        // get the column qualifier
        final byte[] bTimeQualifier = new byte[cell.getQualifierLength()];
        System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), bTimeQualifier, 0,
                cell.getQualifierLength());

        // convert this into a true time, albeit rounded to the column
        // interval granularity
        final long columnIntervalNo = Bytes.toInt(bTimeQualifier);
        final long columnIntervalSize = interval.columnInterval();
        final long columnTimeComponentMillis = columnIntervalNo * columnIntervalSize;
        final long rowKeyPartialTimeMillis = simpleRowKeyBuilder.getPartialTimestamp(rowKey);
        final long fullTimestamp = rowKeyPartialTimeMillis + columnTimeComponentMillis;

        LOGGER.debug("Col: [" + ByteArrayUtils.byteArrayToHex(bTimeQualifier) + "] - ["
                + Bytes.toInt(bTimeQualifier) + "] - [" + fullTimestamp + "] - ["
                + DateUtil.createNormalDateTimeString(fullTimestamp) + "]");

        final byte[] bValue = new byte[cell.getValueLength()];
        System.arraycopy(cell.getValueArray(), cell.getValueOffset(), bValue, 0, cell.getValueLength());

        switch (statsType) {
            case VALUE:
                final ValueCellValue cellValue = new ValueCellValue(bValue);

                LOGGER.debug("Val: " + cellValue);
                break;
            case COUNT:
                LOGGER.debug("Val: " + Bytes.toLong(bValue));
                break;
        }

    }
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:40,代码来源:StatisticsTestService.java

示例15: filterKeyValue

import org.apache.hadoop.hbase.Cell; //导入依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell v) {
  if (filterOutRow) {
    return ReturnCode.NEXT_ROW;
  }
  return ReturnCode.INCLUDE;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:RandomRowFilter.java


注:本文中的org.apache.hadoop.hbase.Cell类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。