當前位置: 首頁>>代碼示例>>Java>>正文


Java HTable.close方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.HTable.close方法的典型用法代碼示例。如果您正苦於以下問題:Java HTable.close方法的具體用法?Java HTable.close怎麽用?Java HTable.close使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.HTable的用法示例。


在下文中一共展示了HTable.close方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testMultiRowRangeFilterWithEmptyStartRow

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Test
public void testMultiRowRangeFilterWithEmptyStartRow() throws IOException {
  tableName = Bytes.toBytes("testMultiRowRangeFilterWithEmptyStartRow");
  HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(numRows, ht, family, qf, value);
  Scan scan = new Scan();
  scan.setMaxVersions();

  List<RowRange> ranges = new ArrayList<RowRange>();
  ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(10), false));
  ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));

  MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
  scan.setFilter(filter);
  int resultsSize = getResultsSize(ht, scan);
  List<Cell> results1 = getScanResult(Bytes.toBytes(""), Bytes.toBytes(10), ht);
  List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
  assertEquals(results1.size() + results2.size(), resultsSize);

  ht.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:TestMultiRowRangeFilter.java

示例2: createTable

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
/**
 * Create a table with specified table name and region number.
 * @param tablename
 * @param regionNum
 * @return
 * @throws IOException
 */
private static void createTable(TableName tableName, int regionNum)
    throws IOException {
  int expectedRegions = regionNum;
  byte[][] splitKeys = new byte[expectedRegions - 1][];
  for (int i = 1; i < expectedRegions; i++) {
    byte splitKey = (byte) i;
    splitKeys[i - 1] = new byte[] { splitKey, splitKey, splitKey };
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
  admin.createTable(desc, splitKeys);

  HTable ht = (HTable) CONNECTION.getTable(tableName);
  @SuppressWarnings("deprecation")
  Map<HRegionInfo, ServerName> regions = ht.getRegionLocations();
  assertEquals("Tried to create " + expectedRegions + " regions "
      + "but only found " + regions.size(), expectedRegions, regions.size());
  ht.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:TestRegionPlacement.java

示例3: countHBaseTable

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
protected int countHBaseTable(String tableName, String colFamily)
    throws IOException {
  int count = 0;
  HTable table = new HTable(new Configuration(
      hbaseTestUtil.getConfiguration()), Bytes.toBytes(tableName));
  try {
    ResultScanner scanner = table.getScanner(Bytes.toBytes(colFamily));
    for(Result result = scanner.next();
        result != null;
        result = scanner.next()) {
      count++;
    }
  } finally {
    table.close();
  }
  return count;
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:18,代碼來源:HBaseTestCase.java

示例4: addToEachStartKey

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
private static int addToEachStartKey(final int expected) throws IOException {
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  HTable meta = new HTable(TEST_UTIL.getConfiguration(),
      TableName.META_TABLE_NAME);
  int rows = 0;
  Scan scan = new Scan();
  scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
  ResultScanner s = meta.getScanner(scan);
  for (Result r = null; (r = s.next()) != null;) {
    HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
    if (hri == null) break;
    if(!hri.getTable().equals(TABLENAME)) {
      continue;
    }
    // If start key, add 'aaa'.
    byte [] row = getStartKey(hri);
    Put p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.add(getTestFamily(), getTestQualifier(), row);
    t.put(p);
    rows++;
  }
  s.close();
  Assert.assertEquals(expected, rows);
  t.close();
  meta.close();
  return rows;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:TestZKBasedOpenCloseRegion.java

示例5: init

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
private void init() {
  logger.debug("Getting region locations");
  try {
    HTable table = new HTable(storagePluginConfig.getHBaseConf(), hbaseScanSpec.getTableName());
    this.hTableDesc = table.getTableDescriptor();
    NavigableMap<HRegionInfo, ServerName> regionsMap = table.getRegionLocations();
    statsCalculator = new TableStatsCalculator(table, hbaseScanSpec, storagePlugin.getContext().getConfig(), storagePluginConfig);

    boolean foundStartRegion = false;
    regionsToScan = new TreeMap<HRegionInfo, ServerName>();
    for (Entry<HRegionInfo, ServerName> mapEntry : regionsMap.entrySet()) {
      HRegionInfo regionInfo = mapEntry.getKey();
      if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
        continue;
      }
      foundStartRegion = true;
      regionsToScan.put(regionInfo, mapEntry.getValue());
      scanSizeInBytes += statsCalculator.getRegionSizeInBytes(regionInfo.getRegionName());
      if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
        break;
      }
    }

    table.close();
  } catch (IOException e) {
    throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
  }
  verifyColumns();
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:30,代碼來源:HBaseGroupScan.java

示例6: generateHBaseDatasetCompositeKeyDate

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void generateHBaseDatasetCompositeKeyDate(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  Date startDate = new Date(1408924800000L);
  long startTime  = startDate.getTime();
  long MILLISECONDS_IN_A_DAY  = (long)1000 * 60 * 60 * 24;
  long MILLISECONDS_IN_A_YEAR = MILLISECONDS_IN_A_DAY * 365;
  long endTime    = startTime + MILLISECONDS_IN_A_YEAR;
  long interval   = MILLISECONDS_IN_A_DAY / 3;

  for (long ts = startTime, counter = 0; ts < endTime; ts += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(16) .putLong(ts).array();

    for(int i = 0; i < 8; ++i) {
      rowKey[8 + i] = (byte)(counter >> (56 - (i * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:40,代碼來源:TestTableGenerator.java

示例7: generateHBaseDatasetCompositeKeyInt

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void generateHBaseDatasetCompositeKeyInt(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  int startVal = 0;
  int stopVal = 1000;
  int interval = 47;
  long counter = 0;
  for (int i = startVal; i < stopVal; i += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(12).putInt(i).array();

    for(int j = 0; j < 8; ++j) {
      rowKey[4 + j] = (byte)(counter >> (56 - (j * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:37,代碼來源:TestTableGenerator.java

示例8: generateHBaseDatasetBigIntOB

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void generateHBaseDatasetBigIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

 HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));

if (numberRegions > 1) {
  admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
  admin.createTable(desc);
}

HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
  byte[] bytes = new byte[9];
  org.apache.hadoop.hbase.util.PositionedByteRange br =
          new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
  org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
          org.apache.hadoop.hbase.util.Order.ASCENDING);
  Put p = new Put(bytes);
  p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
  table.put(p);
}

table.flushCommits();
table.close();

admin.flush(tableName);
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:34,代碼來源:TestTableGenerator.java

示例9: generateHBaseDatasetIntOB

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void generateHBaseDatasetIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (int i = -49; i <= 100; i ++) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:34,代碼來源:TestTableGenerator.java

示例10: scanMeta

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
/**
 * Dumps hbase:meta table info
 *
 * @return # of entries in meta.
 */
protected int scanMeta() throws IOException {
  int count = 0;
  HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
  ResultScanner scanner = meta.getScanner(new Scan());
  LOG.info("Table: " + Bytes.toString(meta.getTableName()));
  for (Result res : scanner) {
    LOG.info(Bytes.toString(res.getRow()));
    count++;
  }
  meta.close();
  return count;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:OfflineMetaRebuildTestCore.java

示例11: insertData

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public void insertData() throws IOException, InterruptedException {
  HTable table = new HTable(conf, tableName);
  DateFormat dateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss");
  int counter = 0;
  long start = 0;
  while (true) {
    if (queue.isEmpty()) {
      if (threadFinishMark[id]) {
        break;
      } else {
        Thread.sleep(SLEEP_INTERVAL);
        continue;
      }
    }
    if (CAL_LATENCY) {
      start = System.currentTimeMillis();
    }
    insertOneRecord(queue.poll());
    if (CAL_LATENCY) {
      updateLatency(System.currentTimeMillis() - start);
    }
    if (counter == PRINT_INTERVAL) {
      counter = 0;
      printAndAddtoReportQueue(
          "coffey thread " + id + " insert data " + doneSize + " class: " + this.getClass()
              .getName() + ", time: " + dateFormat.format(new Date()));
    }
    ++counter;
    ++doneSize;
  }
  table.close();
  printAndAddtoReportQueue("coffey totally insert " + doneSize + " records");
  synchronized (syncBoxObj) {
    totalDoneSize += doneSize;
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:PerfInserterBase.java

示例12: testMultiRowRangeWithFilterListAndOperator

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Test
public void testMultiRowRangeWithFilterListAndOperator() throws IOException {
  tableName = Bytes.toBytes("TestMultiRowRangeFilterWithFilterListAndOperator");
  HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(numRows, ht, family, qf, value);

  Scan scan = new Scan();
  scan.setMaxVersions();

  List<RowRange> ranges1 = new ArrayList<RowRange>();
  ranges1.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
  ranges1.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
  ranges1.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false));

  MultiRowRangeFilter filter1 = new MultiRowRangeFilter(ranges1);

  List<RowRange> ranges2 = new ArrayList<RowRange>();
  ranges2.add(new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(40), false));
  ranges2.add(new RowRange(Bytes.toBytes(80), true, Bytes.toBytes(90), false));

  MultiRowRangeFilter filter2 = new MultiRowRangeFilter(ranges2);

  FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
  filterList.addFilter(filter1);
  filterList.addFilter(filter2);
  scan.setFilter(filterList);
  int resultsSize = getResultsSize(ht, scan);
  LOG.info("found " + resultsSize + " results");
  List<Cell> results1 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);

  assertEquals(results1.size(), resultsSize);

  ht.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:35,代碼來源:TestMultiRowRangeFilter.java

示例13: testLegacyRecovery

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Ignore // TODO: HBASE-13391 to fix flaky test
@Test (timeout=300000)
public void testLegacyRecovery() throws Exception {
  LOG.info(TestRegionObserverInterface.class.getName() +".testLegacyRecovery");
  TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testLegacyRecovery");
  HTable table = util.createTable(tableName, new byte[][] {A, B, C});
  try {
    JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
    ServerName sn2 = rs1.getRegionServer().getServerName();
    String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();

    util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
    while (!sn2.equals(table.getRegionLocations().firstEntry().getValue() )){
      Thread.sleep(100);
    }

    Put put = new Put(ROW);
    put.add(A, A, A);
    put.add(B, B, B);
    put.add(C, C, C);
    table.put(put);

    verifyMethodResult(SimpleRegionObserver.Legacy.class,
        new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
      "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete"},
      tableName,
      new Boolean[] {false, false, true, true, true, true, false}
        );

    verifyMethodResult(SimpleRegionObserver.Legacy.class,
        new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut",
            "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
        tableName,
        new Integer[] {0, 0, 1, 1, 0, 0});

    cluster.killRegionServer(rs1.getRegionServer().getServerName());
    Threads.sleep(1000); // Let the kill soak in.
    util.waitUntilAllRegionsAssigned(tableName);
    LOG.info("All regions assigned");

    verifyMethodResult(SimpleRegionObserver.Legacy.class,
        new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut",
            "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
        tableName,
        new Integer[]{1, 1, 0, 0, 1, 1});
  } finally {
    util.deleteTable(tableName);
    table.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:51,代碼來源:TestRegionObserverInterface.java

示例14: generateHBaseDatasetCompositeKeyTime

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void generateHBaseDatasetCompositeKeyTime(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  long startTime  = 0;
  long MILLISECONDS_IN_A_SEC  = (long)1000;
  long MILLISECONDS_IN_A_DAY = MILLISECONDS_IN_A_SEC * 60 * 60 * 24;
  long endTime    = startTime + MILLISECONDS_IN_A_DAY;
  long smallInterval   = 25;
  long largeInterval   = MILLISECONDS_IN_A_SEC * 42;
  long interval        = smallInterval;

  for (long ts = startTime, counter = 0; ts < endTime; ts += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(16) .putLong(ts).array();

    for(int i = 0; i < 8; ++i) {
      rowKey[8 + i] = (byte)(counter >> (56 - (i * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);

    if (interval == smallInterval) {
      interval = largeInterval;
    } else {
      interval = smallInterval;
    }
  }

  table.flushCommits();
  table.close();
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:47,代碼來源:TestTableGenerator.java

示例15: testExistingZnodeBlocksSplitAndWeRollback

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Test (timeout = 300000) public void testExistingZnodeBlocksSplitAndWeRollback()
throws IOException, InterruptedException, NodeExistsException, KeeperException, ServiceException {
  final TableName tableName =
      TableName.valueOf("testExistingZnodeBlocksSplitAndWeRollback");

  // Create table then get the single region for our new table.
  HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
  List<HRegion> regions = cluster.getRegions(tableName);
  HRegionInfo hri = getAndCheckSingleTableRegion(regions);

  int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);

  RegionStates regionStates = cluster.getMaster().getAssignmentManager().getRegionStates();

  // Turn off balancer so it doesn't cut in and mess up our placements.
  this.admin.setBalancerRunning(false, true);
  // Turn off the meta scanner so it don't remove parent on us.
  cluster.getMaster().setCatalogJanitorEnabled(false);
  try {
    // Add a bit of load up into the table so splittable.
    TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY, false);
    // Get region pre-split.
    HRegionServer server = cluster.getRegionServer(tableRegionIndex);
    printOutRegions(server, "Initial regions: ");
    int regionCount = ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size();
    // Insert into zk a blocking znode, a znode of same name as region
    // so it gets in way of our splitting.
    ServerName fakedServer = ServerName.valueOf("any.old.server", 1234, -1);
    if (useZKForAssignment) {
      ZKAssign.createNodeClosing(TESTING_UTIL.getZooKeeperWatcher(),
        hri, fakedServer);
    } else {
      regionStates.updateRegionState(hri, RegionState.State.CLOSING);
    }
    // Now try splitting.... should fail.  And each should successfully
    // rollback.
    this.admin.split(hri.getRegionNameAsString());
    this.admin.split(hri.getRegionNameAsString());
    this.admin.split(hri.getRegionNameAsString());
    // Wait around a while and assert count of regions remains constant.
    for (int i = 0; i < 10; i++) {
      Thread.sleep(100);
      assertEquals(regionCount, ProtobufUtil.getOnlineRegions(
        server.getRSRpcServices()).size());
    }
    if (useZKForAssignment) {
      // Now clear the zknode
      ZKAssign.deleteClosingNode(TESTING_UTIL.getZooKeeperWatcher(),
        hri, fakedServer);
    } else {
      regionStates.regionOnline(hri, server.getServerName());
    }
    // Now try splitting and it should work.
    split(hri, server, regionCount);
    // Get daughters
    checkAndGetDaughters(tableName);
    // OK, so split happened after we cleared the blocking node.
  } finally {
    admin.setBalancerRunning(true, false);
    cluster.getMaster().setCatalogJanitorEnabled(true);
    t.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:64,代碼來源:TestSplitTransactionOnCluster.java


注:本文中的org.apache.hadoop.hbase.client.HTable.close方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。