当前位置: 首页>>代码示例>>Java>>正文


Java InternalScanner.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.InternalScanner.close方法的典型用法代码示例。如果您正苦于以下问题:Java InternalScanner.close方法的具体用法?Java InternalScanner.close怎么用?Java InternalScanner.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.InternalScanner的用法示例。


在下文中一共展示了InternalScanner.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getFromStoreFile

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
/**
 * Do a small get/scan against one store. This is required because store
 * has no actual methods of querying itself, and relies on StoreScanner.
 */
public static List<Cell> getFromStoreFile(HStore store,
                                              Get get) throws IOException {
  Scan scan = new Scan(get);
  InternalScanner scanner = (InternalScanner) store.getScanner(scan,
      scan.getFamilyMap().get(store.getFamily().getName()),
      // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
      // readpoint 0.
      0);

  List<Cell> result = new ArrayList<Cell>();
  scanner.next(result);
  if (!result.isEmpty()) {
    // verify that we are on the row we want:
    Cell kv = result.get(0);
    if (!CellUtil.matchingRow(kv, get.getRow())) {
      result.clear();
    }
  }
  scanner.close();
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:HBaseTestingUtility.java

示例2: testWhileMatchFilterWithFilterRowKeyWithReverseScan

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
public void testWhileMatchFilterWithFilterRowKeyWithReverseScan()
    throws Exception {
  Scan s = new Scan();
  String prefix = "testRowOne";
  WhileMatchFilter filter = new WhileMatchFilter(new PrefixFilter(
      Bytes.toBytes(prefix)));
  s.setFilter(filter);
  s.setReversed(true);

  InternalScanner scanner = this.region.getScanner(s);
  while (true) {
    ArrayList<Cell> values = new ArrayList<Cell>();
    boolean isMoreResults = scanner.next(values);
    if (!isMoreResults
        || !Bytes.toString(values.get(0).getRow()).startsWith(prefix)) {
      Assert.assertTrue(
          "The WhileMatchFilter should now filter all remaining",
          filter.filterAllRemaining());
    }
    if (!isMoreResults) {
      break;
    }
  }
  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestFilter.java

示例3: sum

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
@Override
public long sum(byte[] family, byte[] qualifier)
throws IOException {
  // aggregate at each region
  Scan scan = new Scan();
  scan.addColumn(family, qualifier);
  int sumResult = 0;

  InternalScanner scanner = ((RegionCoprocessorEnvironment)getEnvironment())
      .getRegion().getScanner(scan);
  try {
    List<KeyValue> curVals = new ArrayList<KeyValue>();
    boolean done = false;
    do {
      curVals.clear();
      done = scanner.next(curVals);
      KeyValue kv = curVals.get(0);
      sumResult += Bytes.toInt(kv.getBuffer(), kv.getValueOffset());
    } while (done);
  } finally {
    scanner.close();
  }
  return sumResult;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:25,代码来源:ColumnAggregationEndpoint.java

示例4: loadAll

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
/**
 * Loads all of the permission grants stored in a region of the {@code _acl_}
 * table.
 *
 * @param aclRegion
 * @return a map of the permissions for this table.
 * @throws IOException
 */
static Map<byte[], ListMultimap<String,TablePermission>> loadAll(Region aclRegion)
  throws IOException {

  if (!isAclRegion(aclRegion)) {
    throw new IOException("Can only load permissions from "+ACL_TABLE_NAME);
  }

  Map<byte[], ListMultimap<String, TablePermission>> allPerms =
      new TreeMap<byte[], ListMultimap<String, TablePermission>>(Bytes.BYTES_RAWCOMPARATOR);

  // do a full scan of _acl_ table

  Scan scan = new Scan();
  scan.addFamily(ACL_LIST_FAMILY);

  InternalScanner iScanner = null;
  try {
    iScanner = aclRegion.getScanner(scan);

    while (true) {
      List<Cell> row = new ArrayList<Cell>();

      boolean hasNext = iScanner.next(row);
      ListMultimap<String,TablePermission> perms = ArrayListMultimap.create();
      byte[] entry = null;
      for (Cell kv : row) {
        if (entry == null) {
          entry = CellUtil.cloneRow(kv);
        }
        Pair<String,TablePermission> permissionsOfUserOnTable =
            parsePermissionRecord(entry, kv);
        if (permissionsOfUserOnTable != null) {
          String username = permissionsOfUserOnTable.getFirst();
          TablePermission permissions = permissionsOfUserOnTable.getSecond();
          perms.put(username, permissions);
        }
      }
      if (entry != null) {
        allPerms.put(entry, perms);
      }
      if (!hasNext) {
        break;
      }
    }
  } finally {
    if (iScanner != null) {
      iScanner.close();
    }
  }

  return allPerms;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:61,代码来源:AccessControlLists.java

示例5: doScan

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
public static void doScan(
    HRegion region, Scan scan, List<Cell> result) throws IOException {
  InternalScanner scanner = null;
  try {
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    scanner = region.getScanner(scan);
    result.clear();
    scanner.next(result);
  } finally {
    if (scanner != null) scanner.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestRowProcessorEndpoint.java

示例6: verifyMerge

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
private void verifyMerge(final HRegion merged, final int upperbound)
throws IOException {
  //Test
  Scan scan = new Scan();
  scan.addFamily(FAMILY);
  InternalScanner scanner = merged.getScanner(scan);
  try {
  List<Cell> testRes = null;
    while (true) {
      testRes = new ArrayList<Cell>();
      boolean hasNext = scanner.next(testRes);
      if (!hasNext) {
        break;
      }
    }
  } finally {
    scanner.close();
  }

  //!Test

  for (int i = 0; i < upperbound; i++) {
    for (int j = 0; j < rows[i].length; j++) {
      Get get = new Get(rows[i][j]);
      get.addFamily(FAMILY);
      Result result = merged.get(get);
      assertEquals(1, result.size());
      byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]);
      assertNotNull(Bytes.toStringBinary(rows[i][j]), bytes);
      assertTrue(Bytes.equals(bytes, rows[i][j]));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestMergeTool.java

示例7: testWhileMatchFilterWithFilterRowWithReverseScan

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
public void testWhileMatchFilterWithFilterRowWithReverseScan()
    throws Exception {
  final int pageSize = 4;

  Scan s = new Scan();
  s.setReversed(true);
  WhileMatchFilter filter = new WhileMatchFilter(new PageFilter(pageSize));
  s.setFilter(filter);

  InternalScanner scanner = this.region.getScanner(s);
  int scannerCounter = 0;
  while (true) {
    boolean isMoreResults = scanner.next(new ArrayList<Cell>());
    scannerCounter++;

    if (scannerCounter >= pageSize) {
      Assert.assertTrue(
          "The WhileMatchFilter should now filter all remaining",
          filter.filterAllRemaining());
    }
    if (!isMoreResults) {
      break;
    }
  }
  scanner.close();
  Assert.assertEquals("The page filter returned more rows than expected",
      pageSize, scannerCounter);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestFilter.java

示例8: testScannerSelection

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(TABLE);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flush(true);
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig.blockCacheDisabled = false;
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(expectedCount, accessedFiles.size());
  region.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestScannerSelectionUsingKeyRange.java

示例9: getRowCount

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
/**
 * Returns a count of the rows in the region where this coprocessor is loaded.
 */
@Override
public void getRowCount(RpcController controller, ExampleProtos.CountRequest request,
                        RpcCallback<ExampleProtos.CountResponse> done) {
  Scan scan = new Scan();
  scan.setFilter(new FirstKeyOnlyFilter());
  ExampleProtos.CountResponse response = null;
  InternalScanner scanner = null;
  try {
    scanner = env.getRegion().getScanner(scan);
    List<Cell> results = new ArrayList<Cell>();
    boolean hasMore = false;
    byte[] lastRow = null;
    long count = 0;
    do {
      hasMore = scanner.next(results);
      for (Cell kv : results) {
        byte[] currentRow = CellUtil.cloneRow(kv);
        if (lastRow == null || !Bytes.equals(lastRow, currentRow)) {
          lastRow = currentRow;
          count++;
        }
      }
      results.clear();
    } while (hasMore);

    response = ExampleProtos.CountResponse.newBuilder()
        .setCount(count).build();
  } catch (IOException ioe) {
    ResponseConverter.setControllerException(controller, ioe);
  } finally {
    if (scanner != null) {
      try {
        scanner.close();
      } catch (IOException ignored) {}
    }
  }
  done.run(response);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:42,代码来源:RowCountEndpoint.java

示例10: getKeyValueCount

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
/**
 * Returns a count of all KeyValues in the region where this coprocessor is loaded.
 */
@Override
public void getKeyValueCount(RpcController controller, ExampleProtos.CountRequest request,
                             RpcCallback<ExampleProtos.CountResponse> done) {
  ExampleProtos.CountResponse response = null;
  InternalScanner scanner = null;
  try {
    scanner = env.getRegion().getScanner(new Scan());
    List<Cell> results = new ArrayList<Cell>();
    boolean hasMore = false;
    long count = 0;
    do {
      hasMore = scanner.next(results);
      for (Cell kv : results) {
        count++;
      }
      results.clear();
    } while (hasMore);

    response = ExampleProtos.CountResponse.newBuilder()
        .setCount(count).build();
  } catch (IOException ioe) {
    ResponseConverter.setControllerException(controller, ioe);
  } finally {
    if (scanner != null) {
      try {
        scanner.close();
      } catch (IOException ignored) {}
    }
  }
  done.run(response);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:RowCountEndpoint.java

示例11: getSum

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void getSum(RpcController controller, SumRequest request, RpcCallback done) {
	Scan scan = new Scan();
	scan.addFamily(Bytes.toBytes(request.getFamily()));
	scan.addColumn(Bytes.toBytes(request.getFamily()), Bytes.toBytes(request.getColumn()));
	SumResponse response = null;
	InternalScanner scanner = null;
	try {
		scanner = env.getRegion().getScanner(scan);
		List<Cell> results = new ArrayList<Cell>();
		boolean hasMore = false;
		long sum = 0L;
		do {
			hasMore = scanner.next(results);
			for (Cell cell : results) {
				sum = sum + Bytes.toLong(CellUtil.cloneValue(cell));
			}
			results.clear();
		} while (hasMore);

		response = SumResponse.newBuilder().setSum(sum).build();

	} catch (IOException ioe) {
		ResponseConverter.setControllerException(controller, ioe);
	} finally {
		if (scanner != null) {
			try {
				scanner.close();
			} catch (IOException ignored) {
			}
		}
	}
	done.run(response);
}
 
开发者ID:tspannhw,项目名称:hbasecoprocessor,代码行数:36,代码来源:SumEndPoint.java

示例12: getMax

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
@Override
public <T, S> T getMax(ColumnInterpreter<T, S> ci, Scan scan)
    throws IOException {
  T temp;
  T max = null;
  InternalScanner scanner = ((RegionCoprocessorEnvironment) getEnvironment())
      .getRegion().getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  byte[] colFamily = scan.getFamilies()[0];
  byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst();
  // qualifier can be null.
  try {
    boolean hasMoreRows = false;
    do {
      hasMoreRows = scanner.next(results);
      for (KeyValue kv : results) {
        temp = ci.getValue(colFamily, qualifier, kv);
        max = (max == null || (temp != null && ci.compare(temp, max) > 0)) ? temp : max;
      }
      results.clear();
    } while (hasMoreRows);
  } finally {
    scanner.close();
  }
  log.info("Maximum from this region is "
      + ((RegionCoprocessorEnvironment) getEnvironment()).getRegion()
          .getRegionNameAsString() + ": " + max);
  return max;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:30,代码来源:AggregateImplementation.java

示例13: testScannerSelection

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(TABLE);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig.blockCacheDisabled = false;
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(expectedCount, accessedFiles.size());
  region.close();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:39,代码来源:TestScannerSelectionUsingKeyRange.java

示例14: getRowNum

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
@Override
public <T, S> long getRowNum(ColumnInterpreter<T, S> ci, Scan scan)
    throws IOException {
  long counter = 0l;
  List<KeyValue> results = new ArrayList<KeyValue>();
  byte[] colFamily = scan.getFamilies()[0];
  byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst();
  if (scan.getFilter() == null && qualifier == null)
    scan.setFilter(new FirstKeyOnlyFilter());
  InternalScanner scanner = ((RegionCoprocessorEnvironment) getEnvironment())
      .getRegion().getScanner(scan);
  try {
    boolean hasMoreRows = false;
    do {
      hasMoreRows = scanner.next(results);
      if (results.size() > 0) {
        counter++;
      }
      results.clear();
    } while (hasMoreRows);
  } finally {
    scanner.close();
  }
  log.info("Row counter from this region is "
      + ((RegionCoprocessorEnvironment) getEnvironment()).getRegion()
          .getRegionNameAsString() + ": " + counter);
  return counter;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:29,代码来源:AggregateImplementation.java

示例15: getAvg

import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入方法依赖的package包/类
@Override
public <T, S> Pair<S, Long> getAvg(ColumnInterpreter<T, S> ci, Scan scan)
    throws IOException {
  S sumVal = null;
  Long rowCountVal = 0l;
  InternalScanner scanner = ((RegionCoprocessorEnvironment) getEnvironment())
      .getRegion().getScanner(scan);
  byte[] colFamily = scan.getFamilies()[0];
  byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst();
  List<KeyValue> results = new ArrayList<KeyValue>();
  boolean hasMoreRows = false;
  try {
    do {
      results.clear();
      hasMoreRows = scanner.next(results);
      for (KeyValue kv : results) {
        sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily,
            qualifier, kv)));
      }
      rowCountVal++;
    } while (hasMoreRows);
  } finally {
    scanner.close();
  }
  Pair<S, Long> pair = new Pair<S, Long>(sumVal, rowCountVal);
  return pair;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:28,代码来源:AggregateImplementation.java


注:本文中的org.apache.hadoop.hbase.regionserver.InternalScanner.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。