当前位置: 首页>>代码示例>>Java>>正文


Java HBaseTestingUtility.getZooKeeperWatcher方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HBaseTestingUtility.getZooKeeperWatcher方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseTestingUtility.getZooKeeperWatcher方法的具体用法?Java HBaseTestingUtility.getZooKeeperWatcher怎么用?Java HBaseTestingUtility.getZooKeeperWatcher使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HBaseTestingUtility的用法示例。


在下文中一共展示了HBaseTestingUtility.getZooKeeperWatcher方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  zkw = HBaseTestingUtility.getZooKeeperWatcher(utility);
  String fakeRs1 = ZKUtil.joinZNode(zkw.rsZNode, "hostname1.example.org:1234");
  try {
    ZKClusterId.setClusterId(zkw, new ClusterId());
    rp = ReplicationFactory.getReplicationPeers(zkw, conf, zkw);
    rp.init();
    rt = ReplicationFactory.getReplicationTracker(zkw, rp, conf, zkw, new DummyServer(fakeRs1));
  } catch (Exception e) {
    fail("Exception during test setup: " + e);
  }
  rsRemovedCount = new AtomicInteger(0);
  rsRemovedData = "";
  plChangedCount = new AtomicInteger(0);
  plChangedData = new ArrayList<String>();
  peerRemovedCount = new AtomicInteger(0);
  peerRemovedData = "";
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestReplicationTrackerZKImpl.java

示例2: setUpBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  utility = new HBaseTestingUtility();
  utility.startMiniZKCluster();
  conf = utility.getConfiguration();
  ZooKeeperWatcher zk = HBaseTestingUtility.getZooKeeperWatcher(utility);
  ZKUtil.createWithParents(zk, zk.rsZNode);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:TestReplicationTrackerZKImpl.java

示例3: setUpBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  utility = new HBaseTestingUtility();
  utility.startMiniZKCluster();
  conf = utility.getConfiguration();
  zkw = HBaseTestingUtility.getZooKeeperWatcher(utility);
  String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication");
  replicationZNode = ZKUtil.joinZNode(zkw.baseZNode, replicationZNodeName);
  KEY_ONE = initPeerClusterState("/hbase1");
  KEY_TWO = initPeerClusterState("/hbase2");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestReplicationStateZKImpl.java

示例4: testRemoveStaleRecoveringRegionsDuringMasterInitialization

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test
public void testRemoveStaleRecoveringRegionsDuringMasterInitialization() throws Exception {
  // this test is for when distributed log replay is enabled
  if (!UTIL.getConfiguration().getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false)) return;
  
  LOG.info("Starting testRemoveStaleRecoveringRegionsDuringMasterInitialization");
  HMaster master = UTIL.getMiniHBaseCluster().getMaster();
  MasterFileSystem fs = master.getMasterFileSystem();

  String failedRegion = "failedRegoin1";
  String staleRegion = "staleRegion";
  ServerName inRecoveryServerName = ServerName.valueOf("mgr,1,1");
  ServerName previouselyFaildServerName = ServerName.valueOf("previous,1,1");
  String walPath = "/hbase/data/.logs/" + inRecoveryServerName.getServerName()
      + "-splitting/test";
  // Create a ZKW to use in the test
  ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
  zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, walPath),
    new SplitLogTask.Owned(inRecoveryServerName, fs.getLogRecoveryMode()).toByteArray(), 
      Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
  String staleRegionPath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, staleRegion);
  ZKUtil.createWithParents(zkw, staleRegionPath);
  String inRecoveringRegionPath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, failedRegion);
  inRecoveringRegionPath = ZKUtil.joinZNode(inRecoveringRegionPath, 
    inRecoveryServerName.getServerName());
  ZKUtil.createWithParents(zkw, inRecoveringRegionPath);
  Set<ServerName> servers = new HashSet<ServerName>();
  servers.add(previouselyFaildServerName);
  fs.removeStaleRecoveringRegionsFromZK(servers);

  // verification
  assertFalse(ZKUtil.checkExists(zkw, staleRegionPath) != -1);
  assertTrue(ZKUtil.checkExists(zkw, inRecoveringRegionPath) != -1);
    
  ZKUtil.deleteChildrenRecursively(zkw, zkw.recoveringRegionsZNode);
  ZKUtil.deleteChildrenRecursively(zkw, zkw.splitLogZNode);
  zkw.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestMasterFileSystem.java

示例5: testMetaRebuildOverlapFail

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testMetaRebuildOverlapFail() throws Exception {
  // Add a new .regioninfo meta entry in hdfs
  byte[] startKey = splits[0];
  byte[] endKey = splits[2];
  createRegion(conf, htbl, startKey, endKey);

  wipeOutMeta();

  // is meta really messed up?
  assertEquals(1, scanMeta());
  assertErrors(doFsck(conf, false),
      new ERROR_CODE[] {
          ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
          ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
          ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
          ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
  // Note, would like to check # of tables, but this takes a while to time
  // out.

  // shutdown the minicluster
  TEST_UTIL.shutdownMiniHBaseCluster();
  TEST_UTIL.shutdownMiniZKCluster();

  // attempt to rebuild meta table from scratch
  HBaseFsck fsck = new HBaseFsck(conf);
  assertFalse(fsck.rebuildMeta(false));

  Multimap<byte[], HbckInfo> problems = fsck.getOverlapGroups(table);
  assertEquals(1, problems.keySet().size());
  assertEquals(3, problems.size());

  // bring up the minicluster
  TEST_UTIL.startMiniZKCluster(); // tables seem enabled by default
  TEST_UTIL.restartHBaseCluster(3);

  ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);

  LOG.info("Waiting for no more RIT");
  ZKAssign.blockUntilNoRIT(zkw);
  LOG.info("No more RIT in ZK, now doing final test verification");
  int tries = 60;
  while(TEST_UTIL.getHBaseCluster()
      .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0 &&
      tries-- > 0) {
    LOG.info("Waiting for RIT: "+TEST_UTIL.getHBaseCluster()
            .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition());
    Thread.sleep(1000);
  }

  // Meta still messed up.
  assertEquals(1, scanMeta());
  HTableDescriptor[] htbls = getTables(TEST_UTIL.getConfiguration());
  LOG.info("Tables present after restart: " + Arrays.toString(htbls));

  // After HBASE-451 HBaseAdmin.listTables() gets table descriptors from FS,
  // so the table is still present and this should be 1.
  assertEquals(1, htbls.length);
  assertErrors(doFsck(conf, false),
      new ERROR_CODE[] {
          ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
          ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
          ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
          ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:66,代码来源:TestOfflineMetaRebuildOverlap.java

示例6: testMetaRebuildHoleFail

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testMetaRebuildHoleFail() throws Exception {
  // Fully remove a meta entry and hdfs region
  byte[] startKey = splits[1];
  byte[] endKey = splits[2];
  deleteRegion(conf, htbl, startKey, endKey);

  wipeOutMeta();

  // is meta really messed up?
  assertEquals(1, scanMeta());
  assertErrors(doFsck(conf, false), new ERROR_CODE[] {
      ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
      ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
      ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
  // Note, would like to check # of tables, but this takes a while to time
  // out.

  // shutdown the minicluster
  TEST_UTIL.shutdownMiniHBaseCluster();
  TEST_UTIL.shutdownMiniZKCluster();

  // attempt to rebuild meta table from scratch
  HBaseFsck fsck = new HBaseFsck(conf);
  assertFalse(fsck.rebuildMeta(false));
  fsck.close();

  // bring up the minicluster
  TEST_UTIL.startMiniZKCluster(); // tables seem enabled by default
  TEST_UTIL.restartHBaseCluster(3);

  ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);

  LOG.info("Waiting for no more RIT");
  ZKAssign.blockUntilNoRIT(zkw);
  LOG.info("No more RIT in ZK, now doing final test verification");
  int tries = 60;
  while(TEST_UTIL.getHBaseCluster()
      .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0 &&
      tries-- > 0) {
    LOG.info("Waiting for RIT: "+TEST_UTIL.getHBaseCluster()
            .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition());
    Thread.sleep(1000);
  }

  // Meta still messed up.
  assertEquals(1, scanMeta());
  HTableDescriptor[] htbls = getTables(TEST_UTIL.getConfiguration());
  LOG.info("Tables present after restart: " + Arrays.toString(htbls));

  // After HBASE-451 HBaseAdmin.listTables() gets table descriptors from FS,
  // so the table is still present and this should be 1.
  assertEquals(1, htbls.length);
  assertErrors(doFsck(conf, false), new ERROR_CODE[] {
      ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
      ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
      ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:59,代码来源:TestOfflineMetaRebuildHole.java

示例7: testTableExistsIfTheSpecifiedTableRegionIsSplitParent

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testTableExistsIfTheSpecifiedTableRegionIsSplitParent() throws Exception {
  ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TESTING_UTIL);
  final TableName tableName =
      TableName.valueOf("testTableExistsIfTheSpecifiedTableRegionIsSplitParent");
  // Create table then get the single region for our new table.
  Table t = createTableAndWait(tableName, Bytes.toBytes("cf"));
  List<HRegion> regions = null;
  try {
    regions = cluster.getRegions(tableName);
    int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo()
      .getRegionName());
    HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
    insertData(tableName, admin, t);
    // Turn off balancer so it doesn't cut in and mess up our placements.
    admin.setBalancerRunning(false, true);
    // Turn off the meta scanner so it don't remove parent on us.
    cluster.getMaster().setCatalogJanitorEnabled(false);
    boolean tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
      tableName);
    assertEquals("The specified table should present.", true, tableExists);
    final HRegion region = findSplittableRegion(regions);
    assertTrue("not able to find a splittable region", region != null);
    SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2"));
    try {
      st.prepare();
      st.createDaughters(regionServer, regionServer, null);
    } catch (IOException e) {

    }
    tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
      tableName);
    assertEquals("The specified table should present.", true, tableExists);
    Map<String, RegionState> rit = cluster.getMaster().getAssignmentManager().getRegionStates()
        .getRegionsInTransition();
    assertTrue(rit.size() == 3);
    cluster.getMaster().getAssignmentManager().regionOffline(st.getFirstDaughter());
    cluster.getMaster().getAssignmentManager().regionOffline(st.getSecondDaughter());
    cluster.getMaster().getAssignmentManager().regionOffline(region.getRegionInfo());
    rit = cluster.getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition();
    assertTrue(rit.size() == 0);
  }
  finally {
    admin.setBalancerRunning(true, false);
    cluster.getMaster().setCatalogJanitorEnabled(true);
    t.close();
    TESTING_UTIL.deleteTable(tableName);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:50,代码来源:TestSplitTransactionOnCluster.java


注:本文中的org.apache.hadoop.hbase.HBaseTestingUtility.getZooKeeperWatcher方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。