当前位置: 首页>>代码示例>>Java>>正文


Java HRegion.getScanner方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HRegion.getScanner方法的典型用法代码示例。如果您正苦于以下问题:Java HRegion.getScanner方法的具体用法?Java HRegion.getScanner怎么用?Java HRegion.getScanner使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.HRegion的用法示例。


在下文中一共展示了HRegion.getScanner方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doScan

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
public static void doScan(
    HRegion region, Scan scan, List<Cell> result) throws IOException {
  InternalScanner scanner = null;
  try {
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    scanner = region.getScanner(scan);
    result.clear();
    scanner.next(result);
  } finally {
    if (scanner != null) scanner.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestRowProcessorEndpoint.java

示例2: verifyMerge

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private void verifyMerge(final HRegion merged, final int upperbound)
throws IOException {
  //Test
  Scan scan = new Scan();
  scan.addFamily(FAMILY);
  InternalScanner scanner = merged.getScanner(scan);
  try {
  List<Cell> testRes = null;
    while (true) {
      testRes = new ArrayList<Cell>();
      boolean hasNext = scanner.next(testRes);
      if (!hasNext) {
        break;
      }
    }
  } finally {
    scanner.close();
  }

  //!Test

  for (int i = 0; i < upperbound; i++) {
    for (int j = 0; j < rows[i].length; j++) {
      Get get = new Get(rows[i][j]);
      get.addFamily(FAMILY);
      Result result = merged.get(get);
      assertEquals(1, result.size());
      byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]);
      assertNotNull(Bytes.toStringBinary(rows[i][j]), bytes);
      assertTrue(Bytes.equals(bytes, rows[i][j]));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestMergeTool.java

示例3: runScanner

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private void runScanner(Table hTable, int expectedSize, Filter filter) throws IOException {

    String cf = "f";
    Scan scan = new Scan();
    scan.addFamily(cf.getBytes());
    scan.setFilter(filter);
    List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(table.getBytes());
    HRegion first = regions.get(0);
    first.getScanner(scan);
    RegionScanner scanner = first.getScanner(scan);
    List<Cell> results = new ArrayList<Cell>();
    // Result result;
    long timeBeforeScan = System.currentTimeMillis();
    int found = 0;
    while (scanner.next(results)) {
      found += results.size();
      results.clear();
    }
    found += results.size();
    long scanTime = System.currentTimeMillis() - timeBeforeScan;
    scanner.close();

    LOG.info("\nscan time = " + scanTime + "ms");
    LOG.info("found " + found + " results\n");

    assertEquals(expectedSize, found);
  }
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestFuzzyRowFilterEndToEnd.java

示例4: testScannerSelection

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(TABLE);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flush(true);
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig.blockCacheDisabled = false;
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(expectedCount, accessedFiles.size());
  region.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestScannerSelectionUsingKeyRange.java

示例5: testScanLimitAndOffset

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Test from client side for scan with maxResultPerCF set
 *
 * @throws Exception
 */
@Test
public void testScanLimitAndOffset() throws Exception {
  //byte [] TABLE = HTestConst.DEFAULT_TABLE_BYTES;
  byte [][] ROWS = HTestConst.makeNAscii(HTestConst.DEFAULT_ROW_BYTES, 2);
  byte [][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3);
  byte [][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(HTestConst.DEFAULT_TABLE_BYTES));
  HRegionInfo info = new HRegionInfo(HTestConst.DEFAULT_TABLE, null, null, false);
  for (byte[] family : FAMILIES) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    htd.addFamily(hcd);
  }
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
  try {
    Put put;
    Scan scan;
    Result result;
    boolean toLog = true;

    List<Cell> kvListExp = new ArrayList<Cell>();

    int storeOffset = 1;
    int storeLimit = 3;
    for (int r = 0; r < ROWS.length; r++) {
      put = new Put(ROWS[r]);
      for (int c = 0; c < FAMILIES.length; c++) {
        for (int q = 0; q < QUALIFIERS.length; q++) {
          KeyValue kv = new KeyValue(ROWS[r], FAMILIES[c], QUALIFIERS[q], 1,
              HTestConst.DEFAULT_VALUE_BYTES);
          put.add(kv);
          if (storeOffset <= q && q < storeOffset + storeLimit) {
            kvListExp.add(kv);
          }
        }
      }
      region.put(put);
    }

    scan = new Scan();
    scan.setRowOffsetPerColumnFamily(storeOffset);
    scan.setMaxResultsPerColumnFamily(storeLimit);
    RegionScanner scanner = region.getScanner(scan);
    List<Cell> kvListScan = new ArrayList<Cell>();
    List<Cell> results = new ArrayList<Cell>();
    while (scanner.next(results) || !results.isEmpty()) {
      kvListScan.addAll(results);
      results.clear();
    }
    result = Result.create(kvListScan);
    TestScannersFromClientSide.verifyResult(result, kvListExp, toLog,
        "Testing scan with storeOffset and storeLimit");
  } finally {
    region.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:63,代码来源:TestIntraRowPagination.java

示例6: testFilterListWithPrefixFilter

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testFilterListWithPrefixFilter() throws IOException {
  byte[] family = Bytes.toBytes("f1");
  byte[] qualifier = Bytes.toBytes("q1");
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestFilter"));
  htd.addFamily(new HColumnDescriptor(family));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HRegion testRegion = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
      TEST_UTIL.getConfiguration(), htd);

  for(int i=0; i<5; i++) {
    Put p = new Put(Bytes.toBytes((char)('a'+i) + "row"));
    p.setDurability(Durability.SKIP_WAL);
    p.add(family, qualifier, Bytes.toBytes(String.valueOf(111+i)));
    testRegion.put(p);
  }
  testRegion.flush(true);

  // rows starting with "b"
  PrefixFilter pf = new PrefixFilter(new byte[] {'b'}) ;
  // rows with value of column 'q1' set to '113'
  SingleColumnValueFilter scvf = new SingleColumnValueFilter(
      family, qualifier, CompareOp.EQUAL, Bytes.toBytes("113"));
  // combine these two with OR in a FilterList
  FilterList filterList = new FilterList(Operator.MUST_PASS_ONE, pf, scvf);

  Scan s1 = new Scan();
  s1.setFilter(filterList);
  InternalScanner scanner = testRegion.getScanner(s1);
  List<Cell> results = new ArrayList<Cell>();
  int resultCount = 0;
  while (scanner.next(results)) {
    resultCount++;
    byte[] row =  CellUtil.cloneRow(results.get(0));
    LOG.debug("Found row: " + Bytes.toStringBinary(row));
    assertTrue(Bytes.equals(row, Bytes.toBytes("brow"))
        || Bytes.equals(row, Bytes.toBytes("crow")));
    results.clear();
  }
  assertEquals(2, resultCount);
  scanner.close();

  WAL wal = ((HRegion)testRegion).getWAL();
  ((HRegion)testRegion).close();
  wal.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:47,代码来源:TestFilter.java

示例7: testMultipleColumnPrefixFilter

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testMultipleColumnPrefixFilter() throws IOException {
  String family = "Family";
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestMultipleColumnPrefixFilter"));
  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setMaxVersions(3);
  htd.addFamily(hcd);
  // HRegionInfo info = new HRegionInfo(htd, null, null, false);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.
    getDataTestDir(), TEST_UTIL.getConfiguration(), htd);

  List<String> rows = generateRandomWords(100, "row");
  List<String> columns = generateRandomWords(10000, "column");
  long maxTimestamp = 2;

  List<Cell> kvList = new ArrayList<Cell>();

  Map<String, List<Cell>> prefixMap = new HashMap<String,
      List<Cell>>();

  prefixMap.put("p", new ArrayList<Cell>());
  prefixMap.put("q", new ArrayList<Cell>());
  prefixMap.put("s", new ArrayList<Cell>());

  String valueString = "ValueString";

  for (String row: rows) {
    Put p = new Put(Bytes.toBytes(row));
    p.setDurability(Durability.SKIP_WAL);
    for (String column: columns) {
      for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
        KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
            valueString);
        p.add(kv);
        kvList.add(kv);
        for (String s: prefixMap.keySet()) {
          if (column.startsWith(s)) {
            prefixMap.get(s).add(kv);
          }
        }
      }
    }
    region.put(p);
  }

  MultipleColumnPrefixFilter filter;
  Scan scan = new Scan();
  scan.setMaxVersions();
  byte [][] filter_prefix = new byte [2][];
  filter_prefix[0] = new byte [] {'p'};
  filter_prefix[1] = new byte [] {'q'};
  
  filter = new MultipleColumnPrefixFilter(filter_prefix);
  scan.setFilter(filter);
  List<Cell> results = new ArrayList<Cell>();  
  InternalScanner scanner = region.getScanner(scan);
  while (scanner.next(results))
    ;
  assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size());

  HRegion.closeHRegion(region);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:64,代码来源:TestMultipleColumnPrefixFilter.java

示例8: testMultipleColumnPrefixFilterWithManyFamilies

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testMultipleColumnPrefixFilterWithManyFamilies() throws IOException {
  String family1 = "Family1";
  String family2 = "Family2";
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestMultipleColumnPrefixFilter"));
  HColumnDescriptor hcd1 = new HColumnDescriptor(family1);
  hcd1.setMaxVersions(3);
  htd.addFamily(hcd1);
  HColumnDescriptor hcd2 = new HColumnDescriptor(family2);
  hcd2.setMaxVersions(3);
  htd.addFamily(hcd2);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.
    getDataTestDir(), TEST_UTIL.getConfiguration(), htd);

  List<String> rows = generateRandomWords(100, "row");
  List<String> columns = generateRandomWords(10000, "column");
  long maxTimestamp = 3;

  List<Cell> kvList = new ArrayList<Cell>();

  Map<String, List<Cell>> prefixMap = new HashMap<String,
      List<Cell>>();

  prefixMap.put("p", new ArrayList<Cell>());
  prefixMap.put("q", new ArrayList<Cell>());
  prefixMap.put("s", new ArrayList<Cell>());

  String valueString = "ValueString";

  for (String row: rows) {
    Put p = new Put(Bytes.toBytes(row));
    p.setDurability(Durability.SKIP_WAL);
    for (String column: columns) {
      for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
        double rand = Math.random();
        Cell kv;
        if (rand < 0.5) 
          kv = KeyValueTestUtil.create(row, family1, column, timestamp,
              valueString);
        else 
          kv = KeyValueTestUtil.create(row, family2, column, timestamp,
              valueString);
        p.add(kv);
        kvList.add(kv);
        for (String s: prefixMap.keySet()) {
          if (column.startsWith(s)) {
            prefixMap.get(s).add(kv);
          }
        }
      }
    }
    region.put(p);
  }

  MultipleColumnPrefixFilter filter;
  Scan scan = new Scan();
  scan.setMaxVersions();
  byte [][] filter_prefix = new byte [2][];
  filter_prefix[0] = new byte [] {'p'};
  filter_prefix[1] = new byte [] {'q'};
  
  filter = new MultipleColumnPrefixFilter(filter_prefix);
  scan.setFilter(filter);
  List<Cell> results = new ArrayList<Cell>();  
  InternalScanner scanner = region.getScanner(scan);
  while (scanner.next(results))
    ;
  assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size());

  HRegion.closeHRegion(region);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:73,代码来源:TestMultipleColumnPrefixFilter.java

示例9: testMultipleColumnPrefixFilterWithColumnPrefixFilter

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testMultipleColumnPrefixFilterWithColumnPrefixFilter() throws IOException {
  String family = "Family";
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestMultipleColumnPrefixFilter"));
  htd.addFamily(new HColumnDescriptor(family));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.
    getDataTestDir(), TEST_UTIL.getConfiguration(),htd);

  List<String> rows = generateRandomWords(100, "row");
  List<String> columns = generateRandomWords(10000, "column");
  long maxTimestamp = 2;

  String valueString = "ValueString";

  for (String row: rows) {
    Put p = new Put(Bytes.toBytes(row));
    p.setDurability(Durability.SKIP_WAL);
    for (String column: columns) {
      for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
        KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
            valueString);
        p.add(kv);
      }
    }
    region.put(p);
  }

  MultipleColumnPrefixFilter multiplePrefixFilter;
  Scan scan1 = new Scan();
  scan1.setMaxVersions();
  byte [][] filter_prefix = new byte [1][];
  filter_prefix[0] = new byte [] {'p'};
 
  multiplePrefixFilter = new MultipleColumnPrefixFilter(filter_prefix);
  scan1.setFilter(multiplePrefixFilter);
  List<Cell> results1 = new ArrayList<Cell>();  
  InternalScanner scanner1 = region.getScanner(scan1);
  while (scanner1.next(results1))
    ;
  
  ColumnPrefixFilter singlePrefixFilter;
  Scan scan2 = new Scan();
  scan2.setMaxVersions();
  singlePrefixFilter = new ColumnPrefixFilter(Bytes.toBytes("p"));
 
  scan2.setFilter(singlePrefixFilter);
  List<Cell> results2 = new ArrayList<Cell>();  
  InternalScanner scanner2 = region.getScanner(scan1);
  while (scanner2.next(results2))
    ;
  
  assertEquals(results1.size(), results2.size());

  HRegion.closeHRegion(region);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:57,代码来源:TestMultipleColumnPrefixFilter.java

示例10: testScannerSelection

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  HColumnDescriptor hcd =
    new HColumnDescriptor(FAMILY_BYTES)
        .setMaxVersions(Integer.MAX_VALUE)
        .setTimeToLive(TTL_SECONDS);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(TABLE);
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(info.getEncodedName()),
          conf, htd);

  long ts = EnvironmentEdgeManager.currentTime();
  long version = 0; //make sure each new set of Put's have a new ts
  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
      version += TTL_MS;
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            ts + version, Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flush(true);
    version++;
  }

  Scan scan = new Scan();
  scan.setMaxVersions(Integer.MAX_VALUE);
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    HStore store = (HStore)region.getStore(FAMILY_BYTES);
    store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles);
  } else {
    region.compact(false);
  }

  region.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:65,代码来源:TestScannerSelectionUsingTTL.java


注:本文中的org.apache.hadoop.hbase.regionserver.HRegion.getScanner方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。