当前位置: 首页>>代码示例>>Java>>正文


Java RegionScannerImpl类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl的典型用法代码示例。如果您正苦于以下问题:Java RegionScannerImpl类的具体用法?Java RegionScannerImpl怎么用?Java RegionScannerImpl使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


RegionScannerImpl类属于org.apache.hadoop.hbase.regionserver.HRegion包,在下文中一共展示了RegionScannerImpl类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: scanColSet

import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; //导入依赖的package包/类
private void scanColSet(int[] colSet, int[] expectedResultCols)
    throws IOException {
  LOG.info("Scanning column set: " + Arrays.toString(colSet));
  Scan scan = new Scan(ROW_BYTES, ROW_BYTES);
  addColumnSetToScan(scan, colSet);
  RegionScannerImpl scanner = (RegionScannerImpl) region.getScanner(scan);
  KeyValueHeap storeHeap = scanner.getStoreHeapForTesting();
  assertEquals(0, storeHeap.getHeap().size());
  StoreScanner storeScanner =
      (StoreScanner) storeHeap.getCurrentForTesting();
  @SuppressWarnings({ "unchecked", "rawtypes" })
  List<StoreFileScanner> scanners = (List<StoreFileScanner>)
      (List) storeScanner.getAllScannersForTesting();

  // Sort scanners by their HFile's modification time.
  Collections.sort(scanners, new Comparator<StoreFileScanner>() {
    @Override
    public int compare(StoreFileScanner s1, StoreFileScanner s2) {
      Path p1 = s1.getReader().getHFileReader().getPath();
      Path p2 = s2.getReader().getHFileReader().getPath();
      long t1, t2;
      try {
        t1 = fs.getFileStatus(p1).getModificationTime();
        t2 = fs.getFileStatus(p2).getModificationTime();
      } catch (IOException ex) {
        throw new RuntimeException(ex);
      }
      return t1 < t2 ? -1 : t1 == t2 ? 1 : 0;
    }
  });

  StoreFile.Reader lastStoreFileReader = null;
  for (StoreFileScanner sfScanner : scanners)
    lastStoreFileReader = sfScanner.getReader();

  new HFilePrettyPrinter(conf).run(new String[]{ "-m", "-p", "-f",
      lastStoreFileReader.getHFileReader().getPath().toString()});

  // Disable Bloom filter for the last store file. The disabled Bloom filter
  // will always return "true".
  LOG.info("Disabling Bloom filter for: "
      + lastStoreFileReader.getHFileReader().getName());
  lastStoreFileReader.disableBloomFilterForTesting();

  List<Cell> allResults = new ArrayList<Cell>();

  { // Limit the scope of results.
    List<Cell> results = new ArrayList<Cell>();
    while (scanner.next(results) || results.size() > 0) {
      allResults.addAll(results);
      results.clear();
    }
  }

  List<Integer> actualIds = new ArrayList<Integer>();
  for (Cell kv : allResults) {
    String qual = Bytes.toString(CellUtil.cloneQualifier(kv));
    assertTrue(qual.startsWith(QUALIFIER_PREFIX));
    actualIds.add(Integer.valueOf(qual.substring(
        QUALIFIER_PREFIX.length())));
  }
  List<Integer> expectedIds = new ArrayList<Integer>();
  for (int expectedId : expectedResultCols)
    expectedIds.add(expectedId);

  LOG.info("Column ids returned: " + actualIds + ", expected: "
      + expectedIds);
  assertEquals(expectedIds.toString(), actualIds.toString());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:70,代码来源:TestScanWithBloomError.java

示例2: newRegionScanner

import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; //导入依赖的package包/类
private RegionScannerHolder newRegionScanner(ScanRequest request, ScanResponse.Builder builder)
    throws IOException {
  HRegion region = getRegion(request.getRegion());
  ClientProtos.Scan protoScan = request.getScan();
  boolean isLoadingCfsOnDemandSet = protoScan.hasLoadColumnFamiliesOnDemand();
  Scan scan = ProtobufUtil.toScan(protoScan);
  // if the request doesn't set this, get the default region setting.
  if (!isLoadingCfsOnDemandSet) {
    scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault());
  }

  if (!scan.hasFamilies()) {
    // Adding all families to scanner
    for (byte[] family : region.getTableDescriptor().getColumnFamilyNames()) {
      scan.addFamily(family);
    }
  }
  if (region.getCoprocessorHost() != null) {
    // preScannerOpen is not allowed to return a RegionScanner. Only post hook can create a
    // wrapper for the core created RegionScanner
    region.getCoprocessorHost().preScannerOpen(scan);
  }
  RegionScannerImpl coreScanner = region.getScanner(scan);
  Shipper shipper = coreScanner;
  RegionScanner scanner = coreScanner;
  if (region.getCoprocessorHost() != null) {
    scanner = region.getCoprocessorHost().postScannerOpen(scan, scanner);
  }
  long scannerId = scannerIdGenerator.generateNewScannerId();
  builder.setScannerId(scannerId);
  builder.setMvccReadPoint(scanner.getMvccReadPoint());
  builder.setTtl(scannerLeaseTimeoutPeriod);
  String scannerName = String.valueOf(scannerId);
  return addScanner(scannerName, scanner, shipper, region, scan.isNeedCursorResult());
}
 
开发者ID:apache,项目名称:hbase,代码行数:36,代码来源:RSRpcServices.java

示例3: instantiateRegionScanner

import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; //导入依赖的package包/类
@Override
protected RegionScannerImpl instantiateRegionScanner(Scan scan,
    List<KeyValueScanner> additionalScanners, long nonceGroup, long nonce) throws IOException {
  if (scan.isReversed()) {
    if (scan.getFilter() != null) {
      scan.getFilter().setReversed(true);
    }
    return new HeartbeatReversedRegionScanner(scan, additionalScanners, this);
  }
  return new HeartbeatRegionScanner(scan, additionalScanners, this);
}
 
开发者ID:apache,项目名称:hbase,代码行数:12,代码来源:TestScannerHeartbeatMessages.java

示例4: scanColSet

import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; //导入依赖的package包/类
private void scanColSet(int[] colSet, int[] expectedResultCols)
    throws IOException {
  LOG.info("Scanning column set: " + Arrays.toString(colSet));
  Scan scan = new Scan(ROW_BYTES, ROW_BYTES);
  addColumnSetToScan(scan, colSet);
  RegionScannerImpl scanner = (RegionScannerImpl) region.getScanner(scan);
  KeyValueHeap storeHeap = scanner.getStoreHeapForTesting();
  assertEquals(0, storeHeap.getHeap().size());
  StoreScanner storeScanner =
      (StoreScanner) storeHeap.getCurrentForTesting();
  @SuppressWarnings({ "unchecked", "rawtypes" })
  List<StoreFileScanner> scanners = (List<StoreFileScanner>)
      (List) storeScanner.getAllScannersForTesting();

  // Sort scanners by their HFile's modification time.
  Collections.sort(scanners, new Comparator<StoreFileScanner>() {
    @Override
    public int compare(StoreFileScanner s1, StoreFileScanner s2) {
      Path p1 = s1.getReaderForTesting().getHFileReader().getPath();
      Path p2 = s2.getReaderForTesting().getHFileReader().getPath();
      long t1, t2;
      try {
        t1 = fs.getFileStatus(p1).getModificationTime();
        t2 = fs.getFileStatus(p2).getModificationTime();
      } catch (IOException ex) {
        throw new RuntimeException(ex);
      }
      return t1 < t2 ? -1 : t1 == t2 ? 1 : 0;
    }
  });

  StoreFile.Reader lastStoreFileReader = null;
  for (StoreFileScanner sfScanner : scanners)
    lastStoreFileReader = sfScanner.getReaderForTesting();

  new HFilePrettyPrinter().run(new String[]{ "-m", "-p", "-f",
      lastStoreFileReader.getHFileReader().getPath().toString()});

  // Disable Bloom filter for the last store file. The disabled Bloom filter
  // will always return "true".
  LOG.info("Disabling Bloom filter for: "
      + lastStoreFileReader.getHFileReader().getName());
  lastStoreFileReader.disableBloomFilterForTesting();

  List<KeyValue> allResults = new ArrayList<KeyValue>();

  { // Limit the scope of results.
    List<KeyValue> results = new ArrayList<KeyValue>();
    while (scanner.next(results) || results.size() > 0) {
      allResults.addAll(results);
      results.clear();
    }
  }

  List<Integer> actualIds = new ArrayList<Integer>();
  for (KeyValue kv : allResults) {
    String qual = Bytes.toString(kv.getQualifier());
    assertTrue(qual.startsWith(QUALIFIER_PREFIX));
    actualIds.add(Integer.valueOf(qual.substring(
        QUALIFIER_PREFIX.length())));
  }
  List<Integer> expectedIds = new ArrayList<Integer>();
  for (int expectedId : expectedResultCols)
    expectedIds.add(expectedId);

  LOG.info("Column ids returned: " + actualIds + ", expected: "
      + expectedIds);
  assertEquals(expectedIds.toString(), actualIds.toString());
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:70,代码来源:TestScanWithBloomError.java

示例5: scanColSet

import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; //导入依赖的package包/类
private void scanColSet(int[] colSet, int[] expectedResultCols)
    throws IOException {
  LOG.info("Scanning column set: " + Arrays.toString(colSet));
  Scan scan = new Scan(ROW_BYTES, ROW_BYTES);
  addColumnSetToScan(scan, colSet);
  RegionScannerImpl scanner = (RegionScannerImpl) region.getScanner(scan);
  KeyValueHeap storeHeap = scanner.getStoreHeapForTesting();
  assertEquals(0, storeHeap.getHeap().size());
  StoreScanner storeScanner =
      (StoreScanner) storeHeap.getCurrentForTesting();
  @SuppressWarnings({ "unchecked", "rawtypes" })
  List<StoreFileScanner> scanners = (List<StoreFileScanner>)
      (List) storeScanner.getAllScannersForTesting();

  // Sort scanners by their HFile's modification time.
  Collections.sort(scanners, new Comparator<StoreFileScanner>() {
    @Override
    public int compare(StoreFileScanner s1, StoreFileScanner s2) {
      Path p1 = s1.getReaderForTesting().getHFileReader().getPath();
      Path p2 = s2.getReaderForTesting().getHFileReader().getPath();
      long t1, t2;
      try {
        t1 = fs.getFileStatus(p1).getModificationTime();
        t2 = fs.getFileStatus(p2).getModificationTime();
      } catch (IOException ex) {
        throw new RuntimeException(ex);
      }
      return t1 < t2 ? -1 : t1 == t2 ? 1 : 0;
    }
  });

  StoreFile.Reader lastStoreFileReader = null;
  for (StoreFileScanner sfScanner : scanners)
    lastStoreFileReader = sfScanner.getReaderForTesting();

  new HFilePrettyPrinter().run(new String[]{ "-m", "-p", "-f",
      lastStoreFileReader.getHFileReader().getPath().toString()});

  // Disable Bloom filter for the last store file. The disabled Bloom filter
  // will always return "true".
  LOG.info("Disabling Bloom filter for: "
      + lastStoreFileReader.getHFileReader().getName());
  lastStoreFileReader.disableBloomFilterForTesting();

  List<Cell> allResults = new ArrayList<Cell>();

  { // Limit the scope of results.
    List<Cell> results = new ArrayList<Cell>();
    while (scanner.next(results) || results.size() > 0) {
      allResults.addAll(results);
      results.clear();
    }
  }

  List<Integer> actualIds = new ArrayList<Integer>();
  for (Cell kv : allResults) {
    String qual = Bytes.toString(CellUtil.cloneQualifier(kv));
    assertTrue(qual.startsWith(QUALIFIER_PREFIX));
    actualIds.add(Integer.valueOf(qual.substring(
        QUALIFIER_PREFIX.length())));
  }
  List<Integer> expectedIds = new ArrayList<Integer>();
  for (int expectedId : expectedResultCols)
    expectedIds.add(expectedId);

  LOG.info("Column ids returned: " + actualIds + ", expected: "
      + expectedIds);
  assertEquals(expectedIds.toString(), actualIds.toString());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:70,代码来源:TestScanWithBloomError.java

示例6: testGetScanner_WithNoFamilies

import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; //导入依赖的package包/类
@Test
public void testGetScanner_WithNoFamilies() throws IOException {
  byte[] row1 = Bytes.toBytes("row1");
  byte[] fam1 = Bytes.toBytes("fam1");
  byte[] fam2 = Bytes.toBytes("fam2");
  byte[] fam3 = Bytes.toBytes("fam3");
  byte[] fam4 = Bytes.toBytes("fam4");

  byte[][] families = { fam1, fam2, fam3, fam4 };

  // Setting up region
  this.region = initHRegion(tableName, method, CONF, families);
  try {

    // Putting data in Region
    Put put = new Put(row1);
    put.addColumn(fam1, null, null);
    put.addColumn(fam2, null, null);
    put.addColumn(fam3, null, null);
    put.addColumn(fam4, null, null);
    region.put(put);

    Scan scan = null;
    HRegion.RegionScannerImpl is = null;

    // Testing to see how many scanners that is produced by getScanner,
    // starting
    // with known number, 2 - current = 1
    scan = new Scan();
    scan.addFamily(fam2);
    scan.addFamily(fam4);
    is = region.getScanner(scan);
    assertEquals(1, is.storeHeap.getHeap().size());

    scan = new Scan();
    is = region.getScanner(scan);
    assertEquals(families.length - 1, is.storeHeap.getHeap().size());
  } finally {
    HBaseTestingUtility.closeRegionAndWAL(this.region);
    this.region = null;
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:43,代码来源:TestHRegion.java

示例7: testReverseScanShouldNotScanMemstoreIfReadPtLesser

import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; //导入依赖的package包/类
@Test
public void testReverseScanShouldNotScanMemstoreIfReadPtLesser() throws Exception {
  byte[] cf1 = Bytes.toBytes("CF1");
  byte[][] families = { cf1 };
  byte[] col = Bytes.toBytes("C");
  HBaseConfiguration conf = new HBaseConfiguration();
  this.region = initHRegion(tableName, method, conf, families);
  try {
    // setup with one storefile and one memstore, to create scanner and get an earlier readPt
    Put put = new Put(Bytes.toBytes("19996"));
    put.addColumn(cf1, col, Bytes.toBytes("val"));
    region.put(put);
    Put put2 = new Put(Bytes.toBytes("19995"));
    put2.addColumn(cf1, col, Bytes.toBytes("val"));
    region.put(put2);
    // create a reverse scan
    Scan scan = new Scan(Bytes.toBytes("19996"));
    scan.setReversed(true);
    RegionScannerImpl scanner = region.getScanner(scan);

    // flush the cache. This will reset the store scanner
    region.flushcache(true, true, FlushLifeCycleTracker.DUMMY);

    // create one memstore contains many rows will be skipped
    // to check MemStoreScanner.seekToPreviousRow
    for (int i = 10000; i < 20000; i++) {
      Put p = new Put(Bytes.toBytes("" + i));
      p.addColumn(cf1, col, Bytes.toBytes("" + i));
      region.put(p);
    }
    List<Cell> currRow = new ArrayList<>();
    boolean hasNext;
    boolean assertDone = false;
    do {
      hasNext = scanner.next(currRow);
      // With HBASE-15871, after the scanner is reset the memstore scanner should not be
      // added here
      if (!assertDone) {
        StoreScanner current =
            (StoreScanner) (scanner.storeHeap).getCurrentForTesting();
        List<KeyValueScanner> scanners = current.getAllScannersForTesting();
        assertEquals("There should be only one scanner the store file scanner", 1,
          scanners.size());
        assertDone = true;
      }
    } while (hasNext);
    assertEquals(2, currRow.size());
    assertEquals("19996", Bytes.toString(currRow.get(0).getRowArray(),
      currRow.get(0).getRowOffset(), currRow.get(0).getRowLength()));
    assertEquals("19995", Bytes.toString(currRow.get(1).getRowArray(),
      currRow.get(1).getRowOffset(), currRow.get(1).getRowLength()));
  } finally {
    HBaseTestingUtility.closeRegionAndWAL(this.region);
    this.region = null;
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:57,代码来源:TestHRegion.java

示例8: scanColSet

import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; //导入依赖的package包/类
private void scanColSet(int[] colSet, int[] expectedResultCols)
    throws IOException {
  LOG.info("Scanning column set: " + Arrays.toString(colSet));
  Scan scan = new Scan(ROW_BYTES, ROW_BYTES);
  addColumnSetToScan(scan, colSet);
  RegionScannerImpl scanner = region.getScanner(scan);
  KeyValueHeap storeHeap = scanner.getStoreHeapForTesting();
  assertEquals(0, storeHeap.getHeap().size());
  StoreScanner storeScanner =
      (StoreScanner) storeHeap.getCurrentForTesting();
  @SuppressWarnings({ "unchecked", "rawtypes" })
  List<StoreFileScanner> scanners = (List<StoreFileScanner>)
      (List) storeScanner.getAllScannersForTesting();

  // Sort scanners by their HFile's modification time.
  Collections.sort(scanners, new Comparator<StoreFileScanner>() {
    @Override
    public int compare(StoreFileScanner s1, StoreFileScanner s2) {
      Path p1 = s1.getReader().getHFileReader().getPath();
      Path p2 = s2.getReader().getHFileReader().getPath();
      long t1, t2;
      try {
        t1 = fs.getFileStatus(p1).getModificationTime();
        t2 = fs.getFileStatus(p2).getModificationTime();
      } catch (IOException ex) {
        throw new RuntimeException(ex);
      }
      return t1 < t2 ? -1 : t1 == t2 ? 1 : 0;
    }
  });

  StoreFileReader lastStoreFileReader = null;
  for (StoreFileScanner sfScanner : scanners)
    lastStoreFileReader = sfScanner.getReader();

  new HFilePrettyPrinter(conf).run(new String[]{ "-m", "-p", "-f",
      lastStoreFileReader.getHFileReader().getPath().toString()});

  // Disable Bloom filter for the last store file. The disabled Bloom filter
  // will always return "true".
  LOG.info("Disabling Bloom filter for: "
      + lastStoreFileReader.getHFileReader().getName());
  lastStoreFileReader.disableBloomFilterForTesting();

  List<Cell> allResults = new ArrayList<>();

  { // Limit the scope of results.
    List<Cell> results = new ArrayList<>();
    while (scanner.next(results) || results.size() > 0) {
      allResults.addAll(results);
      results.clear();
    }
  }

  List<Integer> actualIds = new ArrayList<>();
  for (Cell kv : allResults) {
    String qual = Bytes.toString(CellUtil.cloneQualifier(kv));
    assertTrue(qual.startsWith(QUALIFIER_PREFIX));
    actualIds.add(Integer.valueOf(qual.substring(
        QUALIFIER_PREFIX.length())));
  }
  List<Integer> expectedIds = new ArrayList<>();
  for (int expectedId : expectedResultCols)
    expectedIds.add(expectedId);

  LOG.info("Column ids returned: " + actualIds + ", expected: "
      + expectedIds);
  assertEquals(expectedIds.toString(), actualIds.toString());
}
 
开发者ID:apache,项目名称:hbase,代码行数:70,代码来源:TestScanWithBloomError.java


注:本文中的org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。