当前位置: 首页>>代码示例>>Java>>正文


Java ZooKeeperWatcher.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.close方法的典型用法代码示例。如果您正苦于以下问题:Java ZooKeeperWatcher.close方法的具体用法?Java ZooKeeperWatcher.close怎么用?Java ZooKeeperWatcher.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher的用法示例。


在下文中一共展示了ZooKeeperWatcher.close方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: tearDown

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
@After
public void tearDown()
throws KeeperException, ZooKeeperConnectionException, IOException {
  // Make sure zk is clean before we run the next test.
  ZooKeeperWatcher zkw = new ZooKeeperWatcher(TESTUTIL.getConfiguration(),
      "@Before", new Abortable() {
    @Override
    public void abort(String why, Throwable e) {
      throw new RuntimeException(why, e);
    }

    @Override
    public boolean isAborted() {
      return false;
    }
  });
  ZKUtil.deleteNodeRecursively(zkw, zkw.baseZNode);
  zkw.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestMasterNoCluster.java

示例2: getAuthToken

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
/**
 * Get the authentication token of the user for the cluster specified in the configuration
 * @return null if the user does not have the token, otherwise the auth token for the cluster.
 */
private static Token<AuthenticationTokenIdentifier> getAuthToken(Configuration conf, User user)
    throws IOException, InterruptedException {
  ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "TokenUtil-getAuthToken", null);
  try {
    String clusterId = ZKClusterId.readClusterIdZNode(zkw);
    if (clusterId == null) {
      throw new IOException("Failed to get cluster ID");
    }
    return new AuthenticationTokenSelector().selectToken(new Text(clusterId), user.getTokens());
  } catch (KeeperException e) {
    throw new IOException(e);
  } finally {
    zkw.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TokenUtil.java

示例3: getMetaRegionServerName

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
private ServerName getMetaRegionServerName(int replicaId)
throws IOException, KeeperException {
  ZooKeeperWatcher zkw = createZooKeeperWatcher();
  ServerName sn = null;
  try {
    sn = new MetaTableLocator().getMetaRegionLocation(zkw, replicaId);
  } finally {
    zkw.close();
  }
  return sn;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:HBaseFsck.java

示例4: checkAndFixTableLocks

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
private void checkAndFixTableLocks() throws IOException {
  ZooKeeperWatcher zkw = createZooKeeperWatcher();

  try {
    TableLockChecker checker = new TableLockChecker(zkw, errors);
    checker.checkTableLocks();

    if (this.fixTableLocks) {
      checker.fixExpiredTableLocks();
    }
  } finally {
    zkw.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:HBaseFsck.java

示例5: unassignMetaReplica

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
private void unassignMetaReplica(HbckInfo hi) throws IOException, InterruptedException,
KeeperException {
  undeployRegions(hi);
  ZooKeeperWatcher zkw = createZooKeeperWatcher();
  try {
    ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(hi.metaEntry.getReplicaId()));
  } finally {
    zkw.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:HBaseFsck.java

示例6: testMultipleClients

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testMultipleClients() throws Exception {
  //tests lock usage from multiple zookeeper clients with different sessions.
  //acquire one read lock, then one write lock
  final String testName = "testMultipleClients";

  //different zookeeper sessions with separate identifiers
  ZooKeeperWatcher zkWatcher1 = new ZooKeeperWatcher(conf, "testMultipleClients-1", null);
  ZooKeeperWatcher zkWatcher2 = new ZooKeeperWatcher(conf, "testMultipleClients-2", null);

  String znode = ZKUtil.joinZNode(zkWatcher1.tableLockZNode, testName);

  ZKInterProcessReadWriteLock clientLock1
    = new ZKInterProcessReadWriteLock(zkWatcher1, znode, null);
  ZKInterProcessReadWriteLock clientLock2
    = new ZKInterProcessReadWriteLock(zkWatcher2, znode, null);

  InterProcessLock lock1 = clientLock1.readLock(Bytes.toBytes("client1"));
  lock1.acquire();

  //try to acquire, but it will timeout. We are testing whether this will cause any problems
  //due to the read lock being from another client
  InterProcessLock lock2 = clientLock2.writeLock(Bytes.toBytes("client2"));
  assertFalse(lock2.tryAcquire(1000));

  lock1.release();

  //this time it will acquire
  assertTrue(lock2.tryAcquire(5000));
  lock2.release();
  zkWatcher1.close();
  zkWatcher2.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestZKInterProcessReadWriteLock.java

示例7: testRemoveStaleRecoveringRegionsDuringMasterInitialization

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
@Test
public void testRemoveStaleRecoveringRegionsDuringMasterInitialization() throws Exception {
  // this test is for when distributed log replay is enabled
  if (!UTIL.getConfiguration().getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false)) return;
  
  LOG.info("Starting testRemoveStaleRecoveringRegionsDuringMasterInitialization");
  HMaster master = UTIL.getMiniHBaseCluster().getMaster();
  MasterFileSystem fs = master.getMasterFileSystem();

  String failedRegion = "failedRegoin1";
  String staleRegion = "staleRegion";
  ServerName inRecoveryServerName = ServerName.valueOf("mgr,1,1");
  ServerName previouselyFaildServerName = ServerName.valueOf("previous,1,1");
  String walPath = "/hbase/data/.logs/" + inRecoveryServerName.getServerName()
      + "-splitting/test";
  // Create a ZKW to use in the test
  ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
  zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, walPath),
    new SplitLogTask.Owned(inRecoveryServerName, fs.getLogRecoveryMode()).toByteArray(), 
      Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
  String staleRegionPath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, staleRegion);
  ZKUtil.createWithParents(zkw, staleRegionPath);
  String inRecoveringRegionPath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, failedRegion);
  inRecoveringRegionPath = ZKUtil.joinZNode(inRecoveringRegionPath, 
    inRecoveryServerName.getServerName());
  ZKUtil.createWithParents(zkw, inRecoveringRegionPath);
  Set<ServerName> servers = new HashSet<ServerName>();
  servers.add(previouselyFaildServerName);
  fs.removeStaleRecoveringRegionsFromZK(servers);

  // verification
  assertFalse(ZKUtil.checkExists(zkw, staleRegionPath) != -1);
  assertTrue(ZKUtil.checkExists(zkw, inRecoveringRegionPath) != -1);
    
  ZKUtil.deleteChildrenRecursively(zkw, zkw.recoveringRegionsZNode);
  ZKUtil.deleteChildrenRecursively(zkw, zkw.splitLogZNode);
  zkw.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestMasterFileSystem.java

示例8: testRegionServerHostname

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
@Test(timeout=120000)
public void testRegionServerHostname() throws Exception {
  final int NUM_MASTERS = 1;
  final int NUM_RS = 1;
  Enumeration<NetworkInterface> netInterfaceList = NetworkInterface.getNetworkInterfaces();

  while (netInterfaceList.hasMoreElements()) {
    NetworkInterface ni = netInterfaceList.nextElement();
    Enumeration<InetAddress> addrList = ni.getInetAddresses();
    // iterate through host addresses and use each as hostname
    while (addrList.hasMoreElements()) {
      InetAddress addr = addrList.nextElement();
      if (addr.isLoopbackAddress() || addr.isLinkLocalAddress() || addr.isMulticastAddress()) {
        continue;
      }
      String hostName = addr.getHostName();
      LOG.info("Found " + hostName + " on " + ni);
      
      TEST_UTIL.getConfiguration().set(HRegionServer.RS_HOSTNAME_KEY, hostName);
      TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
      try {
        ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
        List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.rsZNode);
        while (servers == null) {
          Threads.sleep(10);
        }
        assertTrue(servers.size() == NUM_RS);
        for (String server : servers) {
          assertTrue("From zookeeper: " + server + " hostname: " + hostName,
            server.startsWith(hostName.toLowerCase()+","));
        }
        zkw.close();
      } finally {
        TEST_UTIL.shutdownMiniCluster();
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestRegionServerHostname.java

示例9: getTableRegions

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
/**
 * get the regions of a given table.
 *
 * @param tableName the name of the table
 * @return Ordered list of {@link HRegionInfo}.
 * @throws IOException
 */
@Override
public List<HRegionInfo> getTableRegions(final TableName tableName)
throws IOException {
  ZooKeeperWatcher zookeeper =
    new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
      new ThrowableAbortable());
  List<HRegionInfo> Regions = null;
  try {
    Regions = MetaTableAccessor.getTableRegions(zookeeper, connection, tableName, true);
  } finally {
    zookeeper.close();
  }
  return Regions;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:HBaseAdmin.java

示例10: testShouldCheckMasterFailOverWhenMETAIsInOpenedState

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
    throws Exception {
  LOG.info("Starting testShouldCheckMasterFailOverWhenMETAIsInOpenedState");
  final int NUM_MASTERS = 1;
  final int NUM_RS = 2;

  // Start the cluster
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.master.info.port", -1);
  conf.setBoolean("hbase.assignment.usezk", true);

  TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();

  // Find regionserver carrying meta.
  List<RegionServerThread> regionServerThreads =
    cluster.getRegionServerThreads();
  Region metaRegion = null;
  HRegionServer metaRegionServer = null;
  for (RegionServerThread regionServerThread : regionServerThreads) {
    HRegionServer regionServer = regionServerThread.getRegionServer();
    metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
    regionServer.abort("");
    if (null != metaRegion) {
      metaRegionServer = regionServer;
      break;
    }
  }

  TEST_UTIL.shutdownMiniHBaseCluster();

  // Create a ZKW to use in the test
  ZooKeeperWatcher zkw =
    HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
        metaRegion, metaRegionServer.getServerName());

  LOG.info("Staring cluster for second time");
  TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, NUM_RS);

  HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
  while (!master.isInitialized()) {
    Thread.sleep(100);
  }
  // Failover should be completed, now wait for no RIT
  log("Waiting for no more RIT");
  ZKAssign.blockUntilNoRIT(zkw);

  zkw.close();
  // Stop the cluster
  TEST_UTIL.shutdownMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestMasterFailover.java

示例11: testMasterRestartAtRegionSplitPendingCatalogJanitor

import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; //导入方法依赖的package包/类
/**
 * Verifies HBASE-5806.  Here the case is that splitting is completed but before the
 * CJ could remove the parent region the master is killed and restarted.
 * @throws IOException
 * @throws InterruptedException
 * @throws NodeExistsException
 * @throws KeeperException
 */
@Test (timeout = 300000)
public void testMasterRestartAtRegionSplitPendingCatalogJanitor()
    throws IOException, InterruptedException, NodeExistsException,
    KeeperException, ServiceException {
  final TableName tableName = TableName
      .valueOf("testMasterRestartAtRegionSplitPendingCatalogJanitor");

  // Create table then get the single region for our new table.
  HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
  List<HRegion> regions = cluster.getRegions(tableName);
  HRegionInfo hri = getAndCheckSingleTableRegion(regions);

  int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);

  // Turn off balancer so it doesn't cut in and mess up our placements.
  this.admin.setBalancerRunning(false, true);
  // Turn off the meta scanner so it don't remove parent on us.
  cluster.getMaster().setCatalogJanitorEnabled(false);
  ZooKeeperWatcher zkw = new ZooKeeperWatcher(t.getConfiguration(),
    "testMasterRestartAtRegionSplitPendingCatalogJanitor", new UselessTestAbortable());
  try {
    // Add a bit of load up into the table so splittable.
    TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY, false);
    // Get region pre-split.
    HRegionServer server = cluster.getRegionServer(tableRegionIndex);
    printOutRegions(server, "Initial regions: ");

    this.admin.split(hri.getRegionNameAsString());
    checkAndGetDaughters(tableName);
    // Assert the ephemeral node is up in zk.
    String path = ZKAssign.getNodeName(zkw, hri.getEncodedName());
    Stat stats = zkw.getRecoverableZooKeeper().exists(path, false);
    LOG.info("EPHEMERAL NODE BEFORE SERVER ABORT, path=" + path + ", stats="
        + stats);
    String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
    Stat stat = new Stat();
    byte[] data = ZKUtil.getDataNoWatch(zkw, node, stat);
    // ZKUtil.create
    for (int i=0; data != null && i<60; i++) {
      Thread.sleep(1000);
      data = ZKUtil.getDataNoWatch(zkw, node, stat);
    }
    assertNull("Waited too long for ZK node to be removed: "+node, data);

    MockMasterWithoutCatalogJanitor master = abortAndWaitForMaster();

    this.admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());

    // Update the region to be offline and split, so that HRegionInfo#equals
    // returns true in checking rebuilt region states map.
    hri.setOffline(true);
    hri.setSplit(true);
    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
    assertTrue("Split parent should be in SPLIT state",
      regionStates.isRegionInState(hri, State.SPLIT));
    ServerName regionServerOfRegion = regionStates.getRegionServerOfRegion(hri);
    assertTrue(regionServerOfRegion == null);
  } finally {
    this.admin.setBalancerRunning(true, false);
    cluster.getMaster().setCatalogJanitorEnabled(true);
    t.close();
    zkw.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:73,代码来源:TestSplitTransactionOnCluster.java


注:本文中的org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。