当前位置: 首页>>代码示例>>Java>>正文


Java ZKFailoverController类代码示例

本文整理汇总了Java中org.apache.hadoop.ha.ZKFailoverController的典型用法代码示例。如果您正苦于以下问题:Java ZKFailoverController类的具体用法?Java ZKFailoverController怎么用?Java ZKFailoverController使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ZKFailoverController类属于org.apache.hadoop.ha包,在下文中一共展示了ZKFailoverController类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
public static void main(String args[])
    throws Exception {
  if (DFSUtil.parseHelpArgument(args, 
      ZKFailoverController.USAGE, System.out, true)) {
    System.exit(0);
  }
  
  GenericOptionsParser parser = new GenericOptionsParser(
      new HdfsConfiguration(), args);
  DFSZKFailoverController zkfc = DFSZKFailoverController.create(
      parser.getConfiguration());
  int retCode = 0;
  try {
    retCode = zkfc.run(parser.getRemainingArgs());
  } catch (Throwable t) {
    LOG.fatal("Got a fatal error, exiting now", t);
  }
  System.exit(retCode);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DFSZKFailoverController.java

示例2: main

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
public static void main(String args[])
    throws Exception {
  StringUtils.startupShutdownMessage(DFSZKFailoverController.class,
      args, LOG);
  if (DFSUtil.parseHelpArgument(args, 
      ZKFailoverController.USAGE, System.out, true)) {
    System.exit(0);
  }
  
  GenericOptionsParser parser = new GenericOptionsParser(
      new HdfsConfiguration(), args);
  DFSZKFailoverController zkfc = DFSZKFailoverController.create(
      parser.getConfiguration());
  int retCode = 0;
  try {
    retCode = zkfc.run(parser.getRemainingArgs());
  } catch (Throwable t) {
    LOG.fatal("Got a fatal error, exiting now", t);
  }
  System.exit(retCode);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:DFSZKFailoverController.java

示例3: buildWebCrawlerJob

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
private SparkJob buildWebCrawlerJob(String inputPath, String outputPath, String outputFaultPath,
        String outputReportPath, String contentRetrieverClassName) {
    SparkJob sparkJob = SparkJobBuilder
            .create()
            .setAppName("Spark WebCrawler")
            .setMainClass(CachedWebCrawlerJob.class)
            .addArg("-inputPath", inputPath)
            .addArg("-contentRetrieverClassName", contentRetrieverClassName)
            .addArg("-lockManagerFactoryClassName", ZookeeperLockManagerFactory.class.getName())
            .addArg("-connectionTimeout", "0")
            .addArg("-readTimeout", "0")
            .addArg("-maxPageContentLength", "0")
            .addArg("-cacheRootDir", cacheRootDir)
            .addArg("-outputPath", outputPath)
            .addArg("-outputFaultPath", outputFaultPath)
            .addArg("-outputReportPath", outputReportPath)
            .addJobProperty("spark.driver.host", "localhost")
            .addJobProperty(ZKFailoverController.ZK_QUORUM_KEY, "localhost:" + zookeeperServer.getPort())
            .build();
    
    return sparkJob;
}
 
开发者ID:openaire,项目名称:iis,代码行数:23,代码来源:CachedWebCrawlerJobTest.java

示例4: main

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
public static void main(String args[])
    throws Exception {
  if (DFSUtil.parseHelpArgument(args, 
      ZKFailoverController.USAGE, System.out, true)) {
    System.exit(0);
  }
  
  GenericOptionsParser parser = new GenericOptionsParser(
      new HdfsConfiguration(), args);
  DFSZKFailoverController zkfc = DFSZKFailoverController.create(
      parser.getConfiguration());
  
  System.exit(zkfc.run(parser.getRemainingArgs()));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:15,代码来源:DFSZKFailoverController.java

示例5: main

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
public static void main(String args[])
    throws Exception {
  if (DFSUtil.parseHelpArgument(args, 
      ZKFailoverController.USAGE, System.out, true)) {
    System.exit(0);
  }
  
  GenericOptionsParser parser = new GenericOptionsParser(
      new JobConf(), args);
  MRZKFailoverController zkfc = MRZKFailoverController.create(
      parser.getConfiguration());
  
  System.exit(zkfc.run(parser.getRemainingArgs()));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:15,代码来源:MRZKFailoverController.java

示例6: setup

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
@Before
public void setup() throws Exception {
  String logicalName = MiniMRHACluster.LOGICAL_NAME;
  conf = new Configuration();
  conf.set(addKeySuffixes(ZKFailoverController.ZK_QUORUM_KEY, logicalName), hostPort);
  conf.set(HAUtil.MR_HA_FENCING_METHODS_KEY,
      AlwaysSucceedFencer.class.getName());
  conf.setBoolean(HAUtil.MR_HA_AUTO_FAILOVER_ENABLED_KEY, true);

  // Turn off IPC client caching, so that the suite can handle
  // the restart of the daemons between test cases.
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
  
  conf.setInt(addKeySuffixes(HAUtil.MR_HA_ZKFC_PORT_KEY, logicalName, "jt1"), 10003);
  conf.setInt(addKeySuffixes(HAUtil.MR_HA_ZKFC_PORT_KEY, logicalName, "jt2"), 10004);

  cluster = new MiniMRHACluster(conf);

  ctx = new TestContext();
  ctx.addThread(thr1 = new ZKFCThread(ctx, 0));
  assertEquals(0, thr1.zkfc.run(new String[]{"-formatZK"}));

  thr1.start();
  waitForHAState(0, HAServiceState.ACTIVE);
  
  ctx.addThread(thr2 = new ZKFCThread(ctx, 1));
  thr2.start();
  
  cluster.startTaskTracker(0, 1);
  cluster.waitActive();
  
  // Wait for the ZKFCs to fully start up
  ZKFCTestUtil.waitForHealthState(thr1.zkfc,
      HealthMonitor.State.SERVICE_HEALTHY, ctx);
  ZKFCTestUtil.waitForHealthState(thr2.zkfc,
      HealthMonitor.State.SERVICE_HEALTHY, ctx);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:40,代码来源:TestMRZKFailoverController.java

示例7: testLockingWithInvalidQuorumKey

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
@Test
public void testLockingWithInvalidQuorumKey() throws Exception {
    Map<String, String> parameters = new HashMap<>();
    parameters.put(LockManagingProcess.PARAM_NODE_ID, "nodeid");
    parameters.put(LockManagingProcess.PARAM_LOCK_MODE, LockMode.obtain.name());
    conf.set(ZKFailoverController.ZK_QUORUM_KEY, "invalid");
    
    exception.expect(IllegalArgumentException.class);
    lockManager.run(portBindings, conf, parameters);
}
 
开发者ID:openaire,项目名称:iis,代码行数:11,代码来源:LockManagingProcessTest.java

示例8: setup

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
@Before
public void setup() throws Exception {
  conf = new Configuration();
  // Specify the quorum per-nameservice, to ensure that these configs
  // can be nameservice-scoped.
  conf.set(ZKFailoverController.ZK_QUORUM_KEY + ".ns1", hostPort);
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
      AlwaysSucceedFencer.class.getName());
  conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);

  // Turn off IPC client caching, so that the suite can handle
  // the restart of the daemons between test cases.
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
  
  conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10023);
  conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10024);

  MiniDFSNNTopology topology = new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022)));
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(topology)
      .numDataNodes(0)
      .build();
  cluster.waitActive();

  ctx = new TestContext();
  ctx.addThread(thr1 = new ZKFCThread(ctx, 0));
  assertEquals(0, thr1.zkfc.run(new String[]{"-formatZK"}));

  thr1.start();
  waitForHAState(0, HAServiceState.ACTIVE);
  
  ctx.addThread(thr2 = new ZKFCThread(ctx, 1));
  thr2.start();
  
  // Wait for the ZKFCs to fully start up
  ZKFCTestUtil.waitForHealthState(thr1.zkfc,
      HealthMonitor.State.SERVICE_HEALTHY, ctx);
  ZKFCTestUtil.waitForHealthState(thr2.zkfc,
      HealthMonitor.State.SERVICE_HEALTHY, ctx);
  
  fs = HATestUtil.configureFailoverFs(cluster, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:TestDFSZKFailoverController.java

示例9: setup

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
@Before
public void setup() throws Exception {
  conf = new Configuration();
  // Specify the quorum per-nameservice, to ensure that these configs
  // can be nameservice-scoped.
  conf.set(ZKFailoverController.ZK_QUORUM_KEY + ".ns1", hostPort);
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
      AlwaysSucceedFencer.class.getName());
  conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);

  // Turn off IPC client caching, so that the suite can handle
  // the restart of the daemons between test cases.
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
  
  conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10003);
  conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10004);

  MiniDFSNNTopology topology = new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002)));
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(topology)
      .numDataNodes(0)
      .build();
  cluster.waitActive();

  ctx = new TestContext();
  ctx.addThread(thr1 = new ZKFCThread(ctx, 0));
  assertEquals(0, thr1.zkfc.run(new String[]{"-formatZK"}));

  thr1.start();
  waitForHAState(0, HAServiceState.ACTIVE);
  
  ctx.addThread(thr2 = new ZKFCThread(ctx, 1));
  thr2.start();
  
  // Wait for the ZKFCs to fully start up
  ZKFCTestUtil.waitForHealthState(thr1.zkfc,
      HealthMonitor.State.SERVICE_HEALTHY, ctx);
  ZKFCTestUtil.waitForHealthState(thr2.zkfc,
      HealthMonitor.State.SERVICE_HEALTHY, ctx);
  
  fs = HATestUtil.configureFailoverFs(cluster, conf);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:48,代码来源:TestDFSZKFailoverController.java

示例10: run

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
@Override
	public void run(PortBindings portBindings, Configuration conf,
			Map<String, String> parameters) throws Exception {
	    
		Preconditions.checkArgument(parameters.containsKey(PARAM_NODE_ID), "node id not provided!");
		Preconditions.checkArgument(parameters.containsKey(PARAM_LOCK_MODE), "lock mode not provided!");

		String zkConnectionString = conf.get(ZKFailoverController.ZK_QUORUM_KEY);
		Preconditions.checkArgument(StringUtils.isNotBlank(zkConnectionString), 
		        "zookeeper quorum is unknown, invalid '%s' property value: %s", ZKFailoverController.ZK_QUORUM_KEY, zkConnectionString);

		int sessionTimeout = parameters.containsKey(PARAM_ZK_SESSION_TIMEOUT)?
		        Integer.valueOf(parameters.get(PARAM_ZK_SESSION_TIMEOUT)) : DEFAULT_SESSION_TIMEOUT;

		final ZooKeeper zooKeeper = new ZooKeeper(zkConnectionString, sessionTimeout, (e) -> {
		 // we are not interested in generic events
		});
		
//		initializing root node if does not exist
		if (zooKeeper.exists(DEFAULT_ROOT_NODE, false) == null) {
			log.info("initializing root node: " + DEFAULT_ROOT_NODE);
			zooKeeper.create(DEFAULT_ROOT_NODE, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
			log.info("root node initialized");
		}

		final String nodePath = LockManagingProcessUtils.generatePath(parameters.get(PARAM_NODE_ID), DEFAULT_ROOT_NODE);
		
		final Semaphore semaphore = new Semaphore(1);
		semaphore.acquire();
		
		switch(LockMode.valueOf(parameters.get(PARAM_LOCK_MODE))) {
		    case obtain: {
		        LockManagingProcessUtils.obtain(zooKeeper, nodePath, semaphore);
		        break;
		    }
		    case release: {
		        LockManagingProcessUtils.release(zooKeeper, nodePath);
		        break;
		    }
		    default: {
		        throw new InvalidParameterException("unsupported lock mode: " + parameters.get(PARAM_LOCK_MODE));
		    }
		}
	}
 
开发者ID:openaire,项目名称:iis,代码行数:45,代码来源:LockManagingProcess.java

示例11: initialize

import org.apache.hadoop.ha.ZKFailoverController; //导入依赖的package包/类
@Before
public void initialize() throws Exception {
    zookeeperServer = new TestingServer(true);
    conf.clear();
    conf.set(ZKFailoverController.ZK_QUORUM_KEY, "localhost:" + zookeeperServer.getPort());
}
 
开发者ID:openaire,项目名称:iis,代码行数:7,代码来源:LockManagingProcessTest.java


注:本文中的org.apache.hadoop.ha.ZKFailoverController类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。