當前位置: 首頁>>代碼示例>>Java>>正文


Java ResultScanner.close方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.ResultScanner.close方法的典型用法代碼示例。如果您正苦於以下問題:Java ResultScanner.close方法的具體用法?Java ResultScanner.close怎麽用?Java ResultScanner.close使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.ResultScanner的用法示例。


在下文中一共展示了ResultScanner.close方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getAll

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
/**
 * 獲取單張表的所有記錄
 * 
 * @throws IOException
 */
public static Map<byte[], byte[]> getAll(String TableName, String ColumnFamily, String ColumnName)
		throws IOException {
	Map<byte[], byte[]> tableContent = new HashMap<byte[], byte[]>();
	Scan s = new Scan();
	s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
	s.setMaxVersions(1);
	s.setCacheBlocks(false);
	ResultScanner rs = hbase_table.getScanner(s);
	for (Result r : rs) {
		byte[] key = r.getRow();
		byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
		tableContent.put(key, value);
	}
	rs.close();
	return tableContent;
}
 
開發者ID:ItGql,項目名稱:SparkIsax,代碼行數:22,代碼來源:HBaseUtils.java

示例2: verify

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
private void verify(final Table table) throws IOException {
  Scan scan = new Scan();
  scan.addColumn(FAMILY_NAME, COLUMN_NAME);
  scan.setMaxVersions(1);
  ResultScanner scanner = table.getScanner(scan);
  for (Result r: scanner) {
    for (Cell kv : r.listCells()) {
      log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv))
          + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv))
          + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv)));
      org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()),
        (Boolean)Bytes.toBoolean(CellUtil.cloneValue(kv)));
    }
  }
  scanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TestTimeRangeMapRed.java

示例3: getCellHeapSize

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
/**
 * @return The approximate heap size of a cell in the test table. All cells should have
 *         approximately the same heap size, so the value is cached to avoid repeating the
 *         calculation
 * @throws Exception
 */
private long getCellHeapSize() throws Exception {
  if (CELL_HEAP_SIZE == -1) {
    // Do a partial scan that will return a single result with a single cell
    Scan scan = new Scan();
    scan.setMaxResultSize(1);
    scan.setAllowPartialResults(true);
    ResultScanner scanner = TABLE.getScanner(scan);

    Result result = scanner.next();

    assertTrue(result != null);
    assertTrue(result.rawCells() != null);
    assertTrue(result.rawCells().length == 1);

    CELL_HEAP_SIZE = CellUtil.estimatedHeapSizeOf(result.rawCells()[0]);
    scanner.close();
  }

  return CELL_HEAP_SIZE;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TestServerSideScanMetricsFromClientSide.java

示例4: testExpectedValuesOfPartialResults

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
public void testExpectedValuesOfPartialResults(boolean reversed) throws Exception {
  Scan partialScan = new Scan();
  partialScan.setMaxVersions();
  // Max result size of 1 ensures that each RPC request will return a single cell. The scanner
  // will need to reconstruct the results into a complete result before returning to the caller
  partialScan.setMaxResultSize(1);
  partialScan.setReversed(reversed);
  ResultScanner partialScanner = TABLE.getScanner(partialScan);

  final int startRow = reversed ? ROWS.length - 1 : 0;
  final int endRow = reversed ? -1 : ROWS.length;
  final int loopDelta = reversed ? -1 : 1;
  String message;

  for (int row = startRow; row != endRow; row = row + loopDelta) {
    message = "Ensuring the expected keyValues are present for row " + row;
    List<Cell> expectedKeyValues = createKeyValuesForRow(ROWS[row], FAMILIES, QUALIFIERS, VALUE);
    Result result = partialScanner.next();
    assertFalse(result.isPartial());
    verifyResult(result, expectedKeyValues, message);
  }

  partialScanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestPartialResultsFromClientSide.java

示例5: list

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
public synchronized NavigableSet<NamespaceDescriptor> list() throws IOException {
  NavigableSet<NamespaceDescriptor> ret =
      Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
  ResultScanner scanner = getNamespaceTable().getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
  try {
    for(Result r : scanner) {
      byte[] val = CellUtil.cloneValue(r.getColumnLatestCell(
        HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
        HTableDescriptor.NAMESPACE_COL_DESC_BYTES));
      ret.add(ProtobufUtil.toNamespaceDescriptor(
          HBaseProtos.NamespaceDescriptor.parseFrom(val)));
    }
  } finally {
    scanner.close();
  }
  return ret;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:TableNamespaceManager.java

示例6: copyTable

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
/**
 * 拷貝表
 * 
 * @throws IOException
 */
public static void copyTable(String oldTableName, String newTableName,String ColumnFamily, String ColumnName)throws IOException {
	if(CreateNewTable(newTableName))
		logger.info("創建表"+newTableName+"表成功");
	else{
		logger.info("創建表"+newTableName+"表失敗");
	}
	Scan s = new Scan();
	s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
	s.setMaxVersions(1);
	s.setCacheBlocks(false);
	ResultScanner rs = hbase_table.getScanner(s);
	
	HTableInterface hbase_table_new = conn.getTable(newTableName);
	for (Result r : rs) {
		byte[] key = r.getRow();
		byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
		Put put = new Put(key);
		put.add(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName), value);
		hbase_table_new.put(put);
	}
	rs.close();
	hbase_table_new.close();
}
 
開發者ID:ItGql,項目名稱:SparkIsax,代碼行數:29,代碼來源:HBaseUtils.java

示例7: doAction

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
@Override
protected boolean doAction() throws Exception {
  ResultScanner rs = null;
  try {
    Scan s = new Scan();
    s.setBatch(2);
    s.addFamily(FAMILY);
    s.setFilter(new KeyOnlyFilter());
    s.setMaxVersions(1);

    rs = table.getScanner(s);
    Result result = rs.next();
    return result != null && result.size() > 0;
  } finally {
    if (rs != null) {
      rs.close();
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:IntegrationTestMTTR.java

示例8: getScannerResults

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
@Override
public List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows)
    throws TIOError, TException {
  Table htable = getTable(table);
  List<TResult> results = null;
  ResultScanner scanner = null;
  try {
    scanner = htable.getScanner(scanFromThrift(scan));
    results = resultsFromHBase(scanner.next(numRows));
  } catch (IOException e) {
    throw getTIOError(e);
  } finally {
    if (scanner != null) {
      scanner.close();
    }
    closeTable(htable);
  }
  return results;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:ThriftHBaseServiceHandler.java

示例9: result

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
private void result(byte[] fam, byte[] row, byte[] qual, byte[] row2, Table table, byte[] value,
    byte[] value2, byte[] row1, byte[] value1) throws IOException {
  Scan s = new Scan(row);
  // If filters are used this attribute can be specifically check for in
  // filterKV method and
  // kvs can be filtered out if the tags of interest is not found in that kv
  s.setAttribute("visibility", Bytes.toBytes("myTag"));
  ResultScanner scanner = null;
  try {
    scanner = table.getScanner(s);
    Result next = scanner.next();

    assertTrue(Bytes.equals(next.getRow(), row));
    assertTrue(Bytes.equals(next.getValue(fam, qual), value));

    Result next2 = scanner.next();
    assertTrue(next2 != null);
    assertTrue(Bytes.equals(next2.getRow(), row1));
    assertTrue(Bytes.equals(next2.getValue(fam, qual), value1));

    next2 = scanner.next();
    assertTrue(next2 != null);
    assertTrue(Bytes.equals(next2.getRow(), row2));
    assertTrue(Bytes.equals(next2.getValue(fam, qual), value2));

  } finally {
    if (scanner != null)
      scanner.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestTags.java

示例10: testAllowPartialResults

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
/**
 * Ensure that we only see Results marked as partial when the allowPartial flag is set
 * @throws Exception
 */
@Test
public void testAllowPartialResults() throws Exception {
  Scan scan = new Scan();
  scan.setAllowPartialResults(true);
  scan.setMaxResultSize(1);
  ResultScanner scanner = TABLE.getScanner(scan);
  Result result = scanner.next();

  assertTrue(result != null);
  assertTrue(result.isPartial());
  assertTrue(result.rawCells() != null);
  assertTrue(result.rawCells().length == 1);

  scanner.close();

  scan.setAllowPartialResults(false);
  scanner = TABLE.getScanner(scan);
  result = scanner.next();

  assertTrue(result != null);
  assertTrue(!result.isPartial());
  assertTrue(result.rawCells() != null);
  assertTrue(result.rawCells().length == NUM_COLS);

  scanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestPartialResultsFromClientSide.java

示例11: addToEachStartKey

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
private static int addToEachStartKey(final int expected) throws IOException {
  Table t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  Table meta = new HTable(TEST_UTIL.getConfiguration(),
      TableName.META_TABLE_NAME);
  int rows = 0;
  Scan scan = new Scan();
  scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
  ResultScanner s = meta.getScanner(scan);
  for (Result r = null; (r = s.next()) != null;) {
    HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
    if (hri == null) break;
    if (!hri.getTable().equals(TABLENAME)) {
      continue;
    }

    // If start key, add 'aaa'.
    if(!hri.getTable().equals(TABLENAME)) {
      continue;
    }
    byte [] row = getStartKey(hri);
    Put p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.add(getTestFamily(), getTestQualifier(), row);
    t.put(p);
    rows++;
  }
  s.close();
  Assert.assertEquals(expected, rows);
  t.close();
  meta.close();
  return rows;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:TestMasterTransitions.java

示例12: countCellsFromScanner

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
/**
 * Exhausts the scanner by calling next repetitively. Once completely exhausted, close scanner and
 * return total cell count
 * @param scanner
 * @return
 * @throws Exception
 */
private int countCellsFromScanner(ResultScanner scanner) throws Exception {
  Result result = null;
  int numCells = 0;
  while ((result = scanner.next()) != null) {
    numCells += result.rawCells().length;
  }

  scanner.close();
  return numCells;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:TestPartialResultsFromClientSide.java

示例13: setUp

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
/**
 * @throws java.lang.Exception
 */
@Before
public void setUp() throws Exception {
  // Starting and stopping replication can make us miss new logs,
  // rolling like this makes sure the most recent one gets added to the queue
  for (JVMClusterUtil.RegionServerThread r :
                        utility1.getHBaseCluster().getRegionServerThreads()) {
    utility1.getHBaseAdmin().rollWALWriter(r.getRegionServer().getServerName());
  }
  utility1.deleteTableData(tableName);
  // truncating the table will send one Delete per row to the slave cluster
  // in an async fashion, which is why we cannot just call deleteTableData on
  // utility2 since late writes could make it to the slave in some way.
  // Instead, we truncate the first table and wait for all the Deletes to
  // make it to the slave.
  Scan scan = new Scan();
  int lastCount = 0;
  for (int i = 0; i < NB_RETRIES; i++) {
    if (i == NB_RETRIES - 1) {
      fail("Waited too much time for truncate");
    }
    ResultScanner scanner = htable2.getScanner(scan);
    Result[] res = scanner.next(NB_ROWS_IN_BIG_BATCH);
    scanner.close();
    if (res.length != 0) {
      if (res.length < lastCount) {
        i--; // Don't increment timeout if we make progress
      }
      lastCount = res.length;
      LOG.info("Still got " + res.length + " rows");
      Thread.sleep(SLEEP_TIME);
    } else {
      break;
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TestReplicationChangingPeerRegionservers.java

示例14: testBulkLoad

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
@Test
public void testBulkLoad() throws Exception {
  TableName tableName = TableName.valueOf("testBulkLoad");
  long l = System.currentTimeMillis();
  HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
  createTable(admin, tableName);
  Scan scan = createScan();
  final HTable table = init(admin, l, scan, tableName);
  // use bulkload
  final Path hfilePath = writeToHFile(l, "/temp/testBulkLoad/", "/temp/testBulkLoad/col/file",
    false);
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
  final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
  bulkload.doBulkLoad(hfilePath, table);
  ResultScanner scanner = table.getScanner(scan);
  Result result = scanner.next();
  result = scanAfterBulkLoad(scanner, result, "version2");
  Put put0 = new Put(Bytes.toBytes("row1"));
  put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
      .toBytes("version3")));
  table.put(put0);
  admin.flush(tableName);
  scanner = table.getScanner(scan);
  result = scanner.next();
  while (result != null) {
    List<KeyValue> kvs = result.getColumn(Bytes.toBytes("col"), Bytes.toBytes("q"));
    for (KeyValue _kv : kvs) {
      if (Bytes.toString(_kv.getRow()).equals("row1")) {
        System.out.println(Bytes.toString(_kv.getRow()));
        System.out.println(Bytes.toString(_kv.getQualifier()));
        System.out.println(Bytes.toString(_kv.getValue()));
        Assert.assertEquals("version3", Bytes.toString(_kv.getValue()));
      }
    }
    result = scanner.next();
  }
  scanner.close();
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:41,代碼來源:TestScannerWithBulkload.java

示例15: addToEachStartKey

import org.apache.hadoop.hbase.client.ResultScanner; //導入方法依賴的package包/類
private static int addToEachStartKey(final int expected) throws IOException {
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  HTable meta = new HTable(TEST_UTIL.getConfiguration(),
      TableName.META_TABLE_NAME);
  int rows = 0;
  Scan scan = new Scan();
  scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
  ResultScanner s = meta.getScanner(scan);
  for (Result r = null; (r = s.next()) != null;) {
    HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
    if (hri == null) break;
    if(!hri.getTable().equals(TABLENAME)) {
      continue;
    }
    // If start key, add 'aaa'.
    byte [] row = getStartKey(hri);
    Put p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.add(getTestFamily(), getTestQualifier(), row);
    t.put(p);
    rows++;
  }
  s.close();
  Assert.assertEquals(expected, rows);
  t.close();
  meta.close();
  return rows;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:TestZKBasedOpenCloseRegion.java


注:本文中的org.apache.hadoop.hbase.client.ResultScanner.close方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。