當前位置: 首頁>>代碼示例>>Java>>正文


Java Scan.setAllowPartialResults方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.Scan.setAllowPartialResults方法的典型用法代碼示例。如果您正苦於以下問題:Java Scan.setAllowPartialResults方法的具體用法?Java Scan.setAllowPartialResults怎麽用?Java Scan.setAllowPartialResults使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.Scan的用法示例。


在下文中一共展示了Scan.setAllowPartialResults方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getCellHeapSize

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * @return The approximate heap size of a cell in the test table. All cells should have
 *         approximately the same heap size, so the value is cached to avoid repeating the
 *         calculation
 * @throws Exception
 */
private long getCellHeapSize() throws Exception {
  if (CELL_HEAP_SIZE == -1) {
    // Do a partial scan that will return a single result with a single cell
    Scan scan = new Scan();
    scan.setMaxResultSize(1);
    scan.setAllowPartialResults(true);
    ResultScanner scanner = TABLE.getScanner(scan);

    Result result = scanner.next();

    assertTrue(result != null);
    assertTrue(result.rawCells() != null);
    assertTrue(result.rawCells().length == 1);

    CELL_HEAP_SIZE = CellUtil.estimatedHeapSizeOf(result.rawCells()[0]);
    scanner.close();
  }

  return CELL_HEAP_SIZE;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TestServerSideScanMetricsFromClientSide.java

示例2: getCellHeapSize

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * @return The approximate heap size of a cell in the test table. All cells should have
 *         approximately the same heap size, so the value is cached to avoid repeating the
 *         calculation
 * @throws Exception
 */
private long getCellHeapSize() throws Exception {
  if (CELL_HEAP_SIZE == -1) {
    // Do a partial scan that will return a single result with a single cell
    Scan scan = new Scan();
    scan.setMaxResultSize(1);
    scan.setAllowPartialResults(true);
    ResultScanner scanner = TABLE.getScanner(scan);

    Result result = scanner.next();

    assertTrue(result != null);
    assertTrue(result.rawCells() != null);
    assertTrue(result.rawCells().length == 1);

    CELL_HEAP_SIZE = CellUtil.estimatedHeapSizeOf(result.rawCells()[0]);
    if (LOG.isInfoEnabled()) LOG.info("Cell heap size: " + CELL_HEAP_SIZE);
    scanner.close();
  }

  return CELL_HEAP_SIZE;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:TestPartialResultsFromClientSide.java

示例3: testNoPartialResultsWhenRowFilterPresent

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * When a scan has a filter where {@link org.apache.hadoop.hbase.filter.Filter#hasFilterRow()} is
 * true, the scanner should not return partial results. The scanner cannot return partial results
 * because the entire row needs to be read for the include/exclude decision to be made
 */
@Test
public void testNoPartialResultsWhenRowFilterPresent() throws Exception {
  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setAllowPartialResults(true);
  // If a filter hasFilter() is true then partial results should not be returned else filter
  // application server side would break.
  scan.setFilter(new RandomRowFilter(1.0f));
  ResultScanner scanner = TABLE.getScanner(scan);

  Result r = null;
  while ((r = scanner.next()) != null) {
    assertFalse(r.isPartial());
  }

  scanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestPartialResultsFromClientSide.java

示例4: testAllowPartialResults

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * Ensure that we only see Results marked as partial when the allowPartial flag is set
 * @throws Exception
 */
@Test
public void testAllowPartialResults() throws Exception {
  Scan scan = new Scan();
  scan.setAllowPartialResults(true);
  scan.setMaxResultSize(1);
  ResultScanner scanner = TABLE.getScanner(scan);
  Result result = scanner.next();

  assertTrue(result != null);
  assertTrue(result.isPartial());
  assertTrue(result.rawCells() != null);
  assertTrue(result.rawCells().length == 1);

  scanner.close();

  scan.setAllowPartialResults(false);
  scanner = TABLE.getScanner(scan);
  result = scanner.next();

  assertTrue(result != null);
  assertTrue(!result.isPartial());
  assertTrue(result.rawCells() != null);
  assertTrue(result.rawCells().length == NUM_COLS);

  scanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestPartialResultsFromClientSide.java

示例5: testExpectedNumberOfCellsPerPartialResult

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public void testExpectedNumberOfCellsPerPartialResult(Scan baseScan, int expectedNumberOfCells)
    throws Exception {

  if (LOG.isInfoEnabled()) LOG.info("groupSize:" + expectedNumberOfCells);

  // Use the cellHeapSize to set maxResultSize such that we know how many cells to expect back
  // from the call. The returned results should NOT exceed expectedNumberOfCells but may be less
  // than it in cases where expectedNumberOfCells is not an exact multiple of the number of
  // columns in the table.
  Scan scan = new Scan(baseScan);
  scan.setAllowPartialResults(true);
  scan.setMaxResultSize(getResultSizeForNumberOfCells(expectedNumberOfCells));

  ResultScanner scanner = TABLE.getScanner(scan);
  Result result = null;
  byte[] prevRow = null;
  while ((result = scanner.next()) != null) {
    assertTrue(result.rawCells() != null);

    // Cases when cell count won't equal expectedNumberOfCells:
    // 1. Returned result is the final result needed to form the complete result for that row
    // 2. It is the first result we have seen for that row and thus may have been fetched as
    // the last group of cells that fit inside the maxResultSize
    assertTrue(
        "Result's cell count differed from expected number. result: " + result,
        result.rawCells().length == expectedNumberOfCells || !result.isPartial()
            || !Bytes.equals(prevRow, result.getRow()));
    prevRow = result.getRow();
  }

  scanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:TestPartialResultsFromClientSide.java

示例6: testPartialResultsReassembly

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public void testPartialResultsReassembly(Scan scanBase) throws Exception {
  Scan partialScan = new Scan(scanBase);
  partialScan.setMaxResultSize(1);
  partialScan.setAllowPartialResults(true);
  ResultScanner partialScanner = TABLE.getScanner(partialScan);

  Scan oneShotScan = new Scan(scanBase);
  oneShotScan.setMaxResultSize(Long.MAX_VALUE);
  ResultScanner oneShotScanner = TABLE.getScanner(oneShotScan);

  ArrayList<Result> partials = new ArrayList<>();
  for (int i = 0; i < NUM_ROWS; i++) {
    Result partialResult = null;
    Result completeResult = null;
    Result oneShotResult = null;
    partials.clear();

    do {
      partialResult = partialScanner.next();
      partials.add(partialResult);
    } while (partialResult != null && partialResult.isPartial());

    completeResult = Result.createCompleteResult(partials);
    oneShotResult = oneShotScanner.next();

    compareResults(completeResult, oneShotResult, null);
  }

  assertTrue(oneShotScanner.next() == null);
  assertTrue(partialScanner.next() == null);

  oneShotScanner.close();
  partialScanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:35,代碼來源:TestPartialResultsFromClientSide.java

示例7: testPartialResultsAndCaching

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * @param resultSizeRowLimit The row limit that will be enforced through maxResultSize
 * @param cachingRowLimit The row limit that will be enforced through caching
 * @throws Exception
 */
public void testPartialResultsAndCaching(int resultSizeRowLimit, int cachingRowLimit)
    throws Exception {
  Scan scan = new Scan();
  scan.setAllowPartialResults(true);

  // The number of cells specified in the call to getResultSizeForNumberOfCells is offset to
  // ensure that the result size we specify is not an exact multiple of the number of cells
  // in a row. This ensures that partial results will be returned when the result size limit
  // is reached before the caching limit.
  int cellOffset = NUM_COLS / 3;
  long maxResultSize = getResultSizeForNumberOfCells(resultSizeRowLimit * NUM_COLS + cellOffset);
  scan.setMaxResultSize(maxResultSize);
  scan.setCaching(cachingRowLimit);

  ResultScanner scanner = TABLE.getScanner(scan);
  ClientScanner clientScanner = (ClientScanner) scanner;
  Result r = null;

  // Approximate the number of rows we expect will fit into the specified max rsult size. If this
  // approximation is less than caching, then we expect that the max result size limit will be
  // hit before the caching limit and thus partial results may be seen
  boolean expectToSeePartialResults = resultSizeRowLimit < cachingRowLimit;
  while ((r = clientScanner.next()) != null) {
    assertTrue(!r.isPartial() || expectToSeePartialResults);
  }

  scanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestPartialResultsFromClientSide.java

示例8: testSmallScansDoNotAllowPartials

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public void testSmallScansDoNotAllowPartials(Scan baseScan) throws Exception {
  Scan scan = new Scan(baseScan);
  scan.setAllowPartialResults(true);
  scan.setSmall(true);
  scan.setMaxResultSize(1);

  ResultScanner scanner = TABLE.getScanner(scan);
  Result r = null;

  while ((r = scanner.next()) != null) {
    assertFalse(r.isPartial());
  }

  scanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:16,代碼來源:TestPartialResultsFromClientSide.java

示例9: testPartialResultWhenRegionMove

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Test
public void testPartialResultWhenRegionMove() throws IOException {
  Table table=createTestTable(TableName.valueOf("testPartialResultWhenRegionMove"),
      ROWS, FAMILIES, QUALIFIERS, VALUE);

  moveRegion(table, 1);

  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setAllowPartialResults(true);
  ResultScanner scanner = table.getScanner(scan);
  for (int i = 0; i < NUM_FAMILIES * NUM_QUALIFIERS - 1; i++) {
    scanner.next();
  }
  Result result1 = scanner.next();
  assertEquals(1, result1.rawCells().length);
  Cell c1 = result1.rawCells()[0];
  assertCell(c1, ROWS[0], FAMILIES[NUM_FAMILIES - 1], QUALIFIERS[NUM_QUALIFIERS - 1]);
  assertFalse(result1.isPartial());

  moveRegion(table, 2);

  Result result2 = scanner.next();
  assertEquals(1, result2.rawCells().length);
  Cell c2 = result2.rawCells()[0];
  assertCell(c2, ROWS[1], FAMILIES[0], QUALIFIERS[0]);
  assertTrue(result2.isPartial());

  moveRegion(table, 3);

  Result result3 = scanner.next();
  assertEquals(1, result3.rawCells().length);
  Cell c3 = result3.rawCells()[0];
  assertCell(c3, ROWS[1], FAMILIES[0], QUALIFIERS[1]);
  assertTrue(result3.isPartial());

}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:38,代碼來源:TestPartialResultsFromClientSide.java

示例10: testReversedPartialResultWhenRegionMove

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Test
public void testReversedPartialResultWhenRegionMove() throws IOException {
  Table table=createTestTable(TableName.valueOf("testReversedPartialResultWhenRegionMove"),
      ROWS, FAMILIES, QUALIFIERS, VALUE);

  moveRegion(table, 1);

  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setAllowPartialResults(true);
  scan.setReversed(true);
  ResultScanner scanner = table.getScanner(scan);
  for (int i = 0; i < NUM_FAMILIES * NUM_QUALIFIERS-1; i++) {
    scanner.next();
  }
  Result result1 = scanner.next();
  assertEquals(1, result1.rawCells().length);
  Cell c1 = result1.rawCells()[0];
  assertCell(c1, ROWS[NUM_ROWS-1], FAMILIES[NUM_FAMILIES - 1], QUALIFIERS[NUM_QUALIFIERS - 1]);
  assertFalse(result1.isPartial());

  moveRegion(table, 2);

  Result result2 = scanner.next();
  assertEquals(1, result2.rawCells().length);
  Cell c2 = result2.rawCells()[0];
  assertCell(c2, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[0]);
  assertTrue(result2.isPartial());

  moveRegion(table, 3);

  Result result3 = scanner.next();
  assertEquals(1, result3.rawCells().length);
  Cell c3 = result3.rawCells()[0];
  assertCell(c3, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[1]);
  assertTrue(result3.isPartial());

}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TestPartialResultsFromClientSide.java

示例11: testOrderingOfCellsInPartialResults

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public void testOrderingOfCellsInPartialResults(final Scan basePartialScan) throws Exception {
  // Scan that retrieves results in pieces (partials). By setting allowPartialResults to be true
  // the results will NOT be reconstructed and instead the caller will see the partial results
  // returned by the server
  Scan partialScan = new Scan(basePartialScan);
  partialScan.setAllowPartialResults(true);
  ResultScanner partialScanner = TABLE.getScanner(partialScan);

  // Scan that retrieves all table results in single RPC request
  Scan oneShotScan = new Scan(basePartialScan);
  oneShotScan.setMaxResultSize(Long.MAX_VALUE);
  oneShotScan.setCaching(ROWS.length);
  ResultScanner oneShotScanner = TABLE.getScanner(oneShotScan);

  Result oneShotResult = oneShotScanner.next();
  Result partialResult = null;
  int iterationCount = 0;

  while (oneShotResult != null && oneShotResult.rawCells() != null) {
    List<Cell> aggregatePartialCells = new ArrayList<Cell>();
    do {
      partialResult = partialScanner.next();
      assertTrue("Partial Result is null. iteration: " + iterationCount, partialResult != null);
      assertTrue("Partial cells are null. iteration: " + iterationCount,
          partialResult.rawCells() != null);

      for (Cell c : partialResult.rawCells()) {
        aggregatePartialCells.add(c);
      }
    } while (partialResult.isPartial());

    assertTrue("Number of cells differs. iteration: " + iterationCount,
        oneShotResult.rawCells().length == aggregatePartialCells.size());
    final Cell[] oneShotCells = oneShotResult.rawCells();
    for (int cell = 0; cell < oneShotCells.length; cell++) {
      Cell oneShotCell = oneShotCells[cell];
      Cell partialCell = aggregatePartialCells.get(cell);

      assertTrue("One shot cell was null", oneShotCell != null);
      assertTrue("Partial cell was null", partialCell != null);
      assertTrue("Cell differs. oneShotCell:" + oneShotCell + " partialCell:" + partialCell,
          oneShotCell.equals(partialCell));
    }

    oneShotResult = oneShotScanner.next();
    iterationCount++;
  }

  assertTrue(partialScanner.next() == null);

  partialScanner.close();
  oneShotScanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:54,代碼來源:TestPartialResultsFromClientSide.java

示例12: testReadPointAndPartialResults

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Test
public void testReadPointAndPartialResults() throws Exception {
  TableName testName = TableName.valueOf("testReadPointAndPartialResults");
  int numRows = 5;
  int numFamilies = 5;
  int numQualifiers = 5;
  byte[][] rows = HTestConst.makeNAscii(Bytes.toBytes("testRow"), numRows);
  byte[][] families = HTestConst.makeNAscii(Bytes.toBytes("testFamily"), numFamilies);
  byte[][] qualifiers = HTestConst.makeNAscii(Bytes.toBytes("testQualifier"), numQualifiers);
  byte[] value = Bytes.createMaxByteArray(100);

  Table tmpTable = createTestTable(testName, rows, families, qualifiers, value);

  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setAllowPartialResults(true);

  // Open scanner before deletes
  ResultScanner scanner = tmpTable.getScanner(scan);

  Delete delete1 = new Delete(rows[0]);
  delete1.addColumn(families[0], qualifiers[0], 0);
  tmpTable.delete(delete1);

  Delete delete2 = new Delete(rows[1]);
  delete2.addColumn(families[1], qualifiers[1], 1);
  tmpTable.delete(delete2);

  // Should see all cells because scanner was opened prior to deletes
  int scannerCount = countCellsFromScanner(scanner);
  int expectedCount = numRows * numFamilies * numQualifiers;
  assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount,
      scannerCount == expectedCount);

  // Minus 2 for the two cells that were deleted
  scanner = tmpTable.getScanner(scan);
  scannerCount = countCellsFromScanner(scanner);
  expectedCount = numRows * numFamilies * numQualifiers - 2;
  assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount,
      scannerCount == expectedCount);

  scanner = tmpTable.getScanner(scan);
  // Put in 2 new rows. The timestamps differ from the deleted rows
  Put put1 = new Put(rows[0]);
  put1.add(new KeyValue(rows[0], families[0], qualifiers[0], 1, value));
  tmpTable.put(put1);

  Put put2 = new Put(rows[1]);
  put2.add(new KeyValue(rows[1], families[1], qualifiers[1], 2, value));
  tmpTable.put(put2);

  // Scanner opened prior to puts. Cell count shouldn't have changed
  scannerCount = countCellsFromScanner(scanner);
  expectedCount = numRows * numFamilies * numQualifiers - 2;
  assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount,
      scannerCount == expectedCount);

  // Now the scanner should see the cells that were added by puts
  scanner = tmpTable.getScanner(scan);
  scannerCount = countCellsFromScanner(scanner);
  expectedCount = numRows * numFamilies * numQualifiers;
  assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount,
      scannerCount == expectedCount);

  TEST_UTIL.deleteTable(testName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:67,代碼來源:TestPartialResultsFromClientSide.java


注:本文中的org.apache.hadoop.hbase.client.Scan.setAllowPartialResults方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。