当前位置: 首页>>代码示例>>Java>>正文


Java Scan.setMaxResultSize方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Scan.setMaxResultSize方法的典型用法代码示例。如果您正苦于以下问题:Java Scan.setMaxResultSize方法的具体用法?Java Scan.setMaxResultSize怎么用?Java Scan.setMaxResultSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Scan的用法示例。


在下文中一共展示了Scan.setMaxResultSize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getCellHeapSize

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * @return The approximate heap size of a cell in the test table. All cells should have
 *         approximately the same heap size, so the value is cached to avoid repeating the
 *         calculation
 * @throws Exception
 */
private long getCellHeapSize() throws Exception {
  if (CELL_HEAP_SIZE == -1) {
    // Do a partial scan that will return a single result with a single cell
    Scan scan = new Scan();
    scan.setMaxResultSize(1);
    scan.setAllowPartialResults(true);
    ResultScanner scanner = TABLE.getScanner(scan);

    Result result = scanner.next();

    assertTrue(result != null);
    assertTrue(result.rawCells() != null);
    assertTrue(result.rawCells().length == 1);

    CELL_HEAP_SIZE = CellUtil.estimatedHeapSizeOf(result.rawCells()[0]);
    scanner.close();
  }

  return CELL_HEAP_SIZE;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestServerSideScanMetricsFromClientSide.java

示例2: testHeartbeatBetweenRows

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * Test the case that the time limit for the scan is reached after each full row of cells is
 * fetched.
 * @throws Exception
 */
public Callable<Void> testHeartbeatBetweenRows() throws Exception {
  return new Callable<Void>() {

    @Override
    public Void call() throws Exception {
      // Configure the scan so that it can read the entire table in a single RPC. We want to test
      // the case where a scan stops on the server side due to a time limit
      Scan scan = new Scan();
      scan.setMaxResultSize(Long.MAX_VALUE);
      scan.setCaching(Integer.MAX_VALUE);

      testEquivalenceOfScanWithHeartbeats(scan, DEFAULT_ROW_SLEEP_TIME, -1, false);
      return null;
    }
  };
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestScannerHeartbeatMessages.java

示例3: getCellHeapSize

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * @return The approximate heap size of a cell in the test table. All cells should have
 *         approximately the same heap size, so the value is cached to avoid repeating the
 *         calculation
 * @throws Exception
 */
private long getCellHeapSize() throws Exception {
  if (CELL_HEAP_SIZE == -1) {
    // Do a partial scan that will return a single result with a single cell
    Scan scan = new Scan();
    scan.setMaxResultSize(1);
    scan.setAllowPartialResults(true);
    ResultScanner scanner = TABLE.getScanner(scan);

    Result result = scanner.next();

    assertTrue(result != null);
    assertTrue(result.rawCells() != null);
    assertTrue(result.rawCells().length == 1);

    CELL_HEAP_SIZE = CellUtil.estimatedHeapSizeOf(result.rawCells()[0]);
    if (LOG.isInfoEnabled()) LOG.info("Cell heap size: " + CELL_HEAP_SIZE);
    scanner.close();
  }

  return CELL_HEAP_SIZE;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestPartialResultsFromClientSide.java

示例4: testExceptionThrownOnMismatchedPartialResults

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * When reconstructing the complete result from its partials we ensure that the row of each
 * partial result is the same. If one of the rows differs, an exception is thrown.
 */
@Test
public void testExceptionThrownOnMismatchedPartialResults() throws IOException {
  assertTrue(NUM_ROWS >= 2);

  ArrayList<Result> partials = new ArrayList<>();
  Scan scan = new Scan();
  scan.setMaxResultSize(Long.MAX_VALUE);
  ResultScanner scanner = TABLE.getScanner(scan);
  Result r1 = scanner.next();
  partials.add(r1);
  Result r2 = scanner.next();
  partials.add(r2);

  assertFalse(Bytes.equals(r1.getRow(), r2.getRow()));

  try {
    Result.createCompleteResult(partials);
    fail("r1 and r2 are from different rows. It should not be possible to combine them into"
        + " a single result");
  } catch (IOException e) {
  }

  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestPartialResultsFromClientSide.java

示例5: testNoPartialResultsWhenRowFilterPresent

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * When a scan has a filter where {@link org.apache.hadoop.hbase.filter.Filter#hasFilterRow()} is
 * true, the scanner should not return partial results. The scanner cannot return partial results
 * because the entire row needs to be read for the include/exclude decision to be made
 */
@Test
public void testNoPartialResultsWhenRowFilterPresent() throws Exception {
  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setAllowPartialResults(true);
  // If a filter hasFilter() is true then partial results should not be returned else filter
  // application server side would break.
  scan.setFilter(new RandomRowFilter(1.0f));
  ResultScanner scanner = TABLE.getScanner(scan);

  Result r = null;
  while ((r = scanner.next()) != null) {
    assertFalse(r.isPartial());
  }

  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestPartialResultsFromClientSide.java

示例6: testReversedCompleteResultWhenRegionMove

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testReversedCompleteResultWhenRegionMove() throws IOException {
  Table table=createTestTable(TableName.valueOf("testReversedCompleteResultWhenRegionMove"),
      ROWS, FAMILIES, QUALIFIERS, VALUE);

  moveRegion(table, 1);

  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setCaching(1);
  scan.setReversed(true);
  ResultScanner scanner = table.getScanner(scan);

  Result result1 = scanner.next();
  assertEquals(NUM_FAMILIES*NUM_QUALIFIERS, result1.rawCells().length);
  Cell c1 = result1.rawCells()[0];
  assertCell(c1, ROWS[NUM_ROWS-1], FAMILIES[0], QUALIFIERS[0]);
  assertFalse(result1.isPartial());

  moveRegion(table, 2);

  Result result2 = scanner.next();
  assertEquals(NUM_FAMILIES*NUM_QUALIFIERS, result2.rawCells().length);
  Cell c2 = result2.rawCells()[0];
  assertCell(c2, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[0]);
  assertFalse(result2.isPartial());

  moveRegion(table, 3);

  Result result3 = scanner.next();
  assertEquals(NUM_FAMILIES*NUM_QUALIFIERS, result3.rawCells().length);
  Cell c3 = result3.rawCells()[0];
  assertCell(c3, ROWS[NUM_ROWS-3], FAMILIES[0], QUALIFIERS[0]);
  assertFalse(result3.isPartial());

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestPartialResultsFromClientSide.java

示例7: testAllowPartialResults

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * Ensure that we only see Results marked as partial when the allowPartial flag is set
 * @throws Exception
 */
@Test
public void testAllowPartialResults() throws Exception {
  Scan scan = new Scan();
  scan.setAllowPartialResults(true);
  scan.setMaxResultSize(1);
  ResultScanner scanner = TABLE.getScanner(scan);
  Result result = scanner.next();

  assertTrue(result != null);
  assertTrue(result.isPartial());
  assertTrue(result.rawCells() != null);
  assertTrue(result.rawCells().length == 1);

  scanner.close();

  scan.setAllowPartialResults(false);
  scanner = TABLE.getScanner(scan);
  result = scanner.next();

  assertTrue(result != null);
  assertTrue(!result.isPartial());
  assertTrue(result.rawCells() != null);
  assertTrue(result.rawCells().length == NUM_COLS);

  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:TestPartialResultsFromClientSide.java

示例8: testEquivalenceOfScanResults

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * Ensure that the results returned from a scanner that retrieves all results in a single RPC call
 * matches the results that are returned from a scanner that must incrementally combine partial
 * results into complete results. A variety of scan configurations can be tested
 * @throws Exception
 */
@Test
public void testEquivalenceOfScanResults() throws Exception {
  Scan oneShotScan = new Scan();
  oneShotScan.setMaxResultSize(Long.MAX_VALUE);

  Scan partialScan = new Scan(oneShotScan);
  partialScan.setMaxResultSize(1);

  testEquivalenceOfScanResults(TABLE, oneShotScan, partialScan);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestPartialResultsFromClientSide.java

示例9: testOrderingOfCellsInPartialResults

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * Order of cells in partial results matches the ordering of cells from complete results
 * @throws Exception
 */
@Test
public void testOrderingOfCellsInPartialResults() throws Exception {
  Scan scan = new Scan();

  for (int col = 1; col <= NUM_COLS; col++) {
    scan.setMaxResultSize(getResultSizeForNumberOfCells(col));
    testOrderingOfCellsInPartialResults(scan);

    // Test again with a reversed scanner
    scan.setReversed(true);
    testOrderingOfCellsInPartialResults(scan);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestPartialResultsFromClientSide.java

示例10: testExpectedNumberOfCellsPerPartialResult

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testExpectedNumberOfCellsPerPartialResult(Scan baseScan, int expectedNumberOfCells)
    throws Exception {

  if (LOG.isInfoEnabled()) LOG.info("groupSize:" + expectedNumberOfCells);

  // Use the cellHeapSize to set maxResultSize such that we know how many cells to expect back
  // from the call. The returned results should NOT exceed expectedNumberOfCells but may be less
  // than it in cases where expectedNumberOfCells is not an exact multiple of the number of
  // columns in the table.
  Scan scan = new Scan(baseScan);
  scan.setAllowPartialResults(true);
  scan.setMaxResultSize(getResultSizeForNumberOfCells(expectedNumberOfCells));

  ResultScanner scanner = TABLE.getScanner(scan);
  Result result = null;
  byte[] prevRow = null;
  while ((result = scanner.next()) != null) {
    assertTrue(result.rawCells() != null);

    // Cases when cell count won't equal expectedNumberOfCells:
    // 1. Returned result is the final result needed to form the complete result for that row
    // 2. It is the first result we have seen for that row and thus may have been fetched as
    // the last group of cells that fit inside the maxResultSize
    assertTrue(
        "Result's cell count differed from expected number. result: " + result,
        result.rawCells().length == expectedNumberOfCells || !result.isPartial()
            || !Bytes.equals(prevRow, result.getRow()));
    prevRow = result.getRow();
  }

  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestPartialResultsFromClientSide.java

示例11: testPartialResultsAndBatch

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testPartialResultsAndBatch(final int batch, final int cellsPerPartialResult)
    throws Exception {
  if (LOG.isInfoEnabled()) {
    LOG.info("batch: " + batch + " cellsPerPartialResult: " + cellsPerPartialResult);
  }

  Scan scan = new Scan();
  scan.setMaxResultSize(getResultSizeForNumberOfCells(cellsPerPartialResult));
  scan.setBatch(batch);
  ResultScanner scanner = TABLE.getScanner(scan);
  Result result = scanner.next();
  int repCount = 0;

  while ((result = scanner.next()) != null) {
    assertTrue(result.rawCells() != null);

    if (result.isPartial()) {
      final String error =
          "Cells:" + result.rawCells().length + " Batch size:" + batch
              + " cellsPerPartialResult:" + cellsPerPartialResult + " rep:" + repCount;
      assertTrue(error, result.rawCells().length <= Math.min(batch, cellsPerPartialResult));
    } else {
      assertTrue(result.rawCells().length <= batch);
    }
    repCount++;
  }

  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestPartialResultsFromClientSide.java

示例12: testPartialResultsReassembly

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testPartialResultsReassembly(Scan scanBase) throws Exception {
  Scan partialScan = new Scan(scanBase);
  partialScan.setMaxResultSize(1);
  partialScan.setAllowPartialResults(true);
  ResultScanner partialScanner = TABLE.getScanner(partialScan);

  Scan oneShotScan = new Scan(scanBase);
  oneShotScan.setMaxResultSize(Long.MAX_VALUE);
  ResultScanner oneShotScanner = TABLE.getScanner(oneShotScan);

  ArrayList<Result> partials = new ArrayList<>();
  for (int i = 0; i < NUM_ROWS; i++) {
    Result partialResult = null;
    Result completeResult = null;
    Result oneShotResult = null;
    partials.clear();

    do {
      partialResult = partialScanner.next();
      partials.add(partialResult);
    } while (partialResult != null && partialResult.isPartial());

    completeResult = Result.createCompleteResult(partials);
    oneShotResult = oneShotScanner.next();

    compareResults(completeResult, oneShotResult, null);
  }

  assertTrue(oneShotScanner.next() == null);
  assertTrue(partialScanner.next() == null);

  oneShotScanner.close();
  partialScanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:TestPartialResultsFromClientSide.java

示例13: testPartialResultsAndCaching

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * @param resultSizeRowLimit The row limit that will be enforced through maxResultSize
 * @param cachingRowLimit The row limit that will be enforced through caching
 * @throws Exception
 */
public void testPartialResultsAndCaching(int resultSizeRowLimit, int cachingRowLimit)
    throws Exception {
  Scan scan = new Scan();
  scan.setAllowPartialResults(true);

  // The number of cells specified in the call to getResultSizeForNumberOfCells is offset to
  // ensure that the result size we specify is not an exact multiple of the number of cells
  // in a row. This ensures that partial results will be returned when the result size limit
  // is reached before the caching limit.
  int cellOffset = NUM_COLS / 3;
  long maxResultSize = getResultSizeForNumberOfCells(resultSizeRowLimit * NUM_COLS + cellOffset);
  scan.setMaxResultSize(maxResultSize);
  scan.setCaching(cachingRowLimit);

  ResultScanner scanner = TABLE.getScanner(scan);
  ClientScanner clientScanner = (ClientScanner) scanner;
  Result r = null;

  // Approximate the number of rows we expect will fit into the specified max rsult size. If this
  // approximation is less than caching, then we expect that the max result size limit will be
  // hit before the caching limit and thus partial results may be seen
  boolean expectToSeePartialResults = resultSizeRowLimit < cachingRowLimit;
  while ((r = clientScanner.next()) != null) {
    assertTrue(!r.isPartial() || expectToSeePartialResults);
  }

  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestPartialResultsFromClientSide.java

示例14: testSmallScansDoNotAllowPartials

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testSmallScansDoNotAllowPartials(Scan baseScan) throws Exception {
  Scan scan = new Scan(baseScan);
  scan.setAllowPartialResults(true);
  scan.setSmall(true);
  scan.setMaxResultSize(1);

  ResultScanner scanner = TABLE.getScanner(scan);
  Result r = null;

  while ((r = scanner.next()) != null) {
    assertFalse(r.isPartial());
  }

  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestPartialResultsFromClientSide.java

示例15: testPartialResultsWithColumnFilter

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testPartialResultsWithColumnFilter(Filter filter) throws Exception {
  assertTrue(!filter.hasFilterRow());

  Scan partialScan = new Scan();
  partialScan.setFilter(filter);

  Scan oneshotScan = new Scan();
  oneshotScan.setFilter(filter);
  oneshotScan.setMaxResultSize(Long.MAX_VALUE);

  for (int i = 1; i <= NUM_COLS; i++) {
    partialScan.setMaxResultSize(getResultSizeForNumberOfCells(i));
    testEquivalenceOfScanResults(TABLE, partialScan, oneshotScan);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestPartialResultsFromClientSide.java


注:本文中的org.apache.hadoop.hbase.client.Scan.setMaxResultSize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。