本文整理汇总了Java中org.apache.hadoop.yarn.server.MiniYARNCluster.init方法的典型用法代码示例。如果您正苦于以下问题:Java MiniYARNCluster.init方法的具体用法?Java MiniYARNCluster.init怎么用?Java MiniYARNCluster.init使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.yarn.server.MiniYARNCluster
的用法示例。
在下文中一共展示了MiniYARNCluster.init方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setupClass
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
/**
* Sets up this class for use in unit testing. It spins up the YARN mini-cluster and also sets up
* various default classes.
*
* @throws IOException if there's an error accessing the local filesystem
* @throws SQLException if there's an error querying the embedded DB
*/
@BeforeClass
public static void setupClass() throws IOException, SQLException {
conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
miniCluster = new MiniYARNCluster("test", 1, 1, 1);
miniCluster.init(conf);
miniCluster.start();
conflictHandler = new ObjectConflictHandler();
conflictHandler.setConf(conf);
destinationObjectFactory = new DestinationObjectFactory();
destinationObjectFactory.setConf(conf);
}
示例2: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
// start minicluster
conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}
示例3: setupMiniYARNCluster
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
private MiniYARNCluster setupMiniYARNCluster() throws Exception {
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
ReservationSystemTestUtil.setupQueueConfiguration(conf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
final MiniYARNCluster cluster =
new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
cluster.init(conf);
cluster.start();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return cluster.getResourceManager().getRMContext()
.getReservationSystem()
.getPlan(ReservationSystemTestUtil.reservationQ)
.getTotalCapacity().getMemorySize() > 6000;
}
}, 10, 10000);
return cluster;
}
示例4: testRMWebAppRedirect
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@Test
public void testRMWebAppRedirect() throws YarnException,
InterruptedException, IOException {
cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 0, 1, 1);
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
String rm1Url = "http://0.0.0.0:18088";
String rm2Url = "http://0.0.0.0:28088";
String header = getHeader("Refresh", rm2Url);
assertTrue(header.contains("; url=" + rm1Url));
header = getHeader("Refresh", rm2Url + "/cluster/cluster");
assertEquals(null, header);
header = getHeader("Refresh", rm2Url + "/ws/v1/cluster/info");
assertEquals(null, header);
header = getHeader("Refresh", rm2Url + "/ws/v1/cluster/apps");
assertTrue(header.contains("; url=" + rm1Url));
// Due to the limitation of MiniYARNCluster and dispatcher is a singleton,
// we couldn't add the test case after explicitFailover();
}
示例5: start
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
public void start() throws IOException {
log.info("Checking if cluster=" + clusterName + " needs to be started");
synchronized (this.startupShutdownMonitor) {
if (started) {
return;
}
log.info("Starting cluster=" + clusterName);
configuration = new YarnConfiguration();
//configuration.setBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, true);
configuration.setBoolean("yarn.is.minicluster", true);
configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/" + clusterName + "-dfs");
dfsCluster = new MiniDFSCluster.Builder(configuration).
numDataNodes(nodes).
build();
yarnCluster = new MiniYARNCluster(clusterName, nodes, 1, 1);
yarnCluster.init(configuration);
yarnCluster.start();
log.info("Started cluster=" + clusterName);
started = true;
}
}
示例6: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
// start minicluster
conf = new YarnConfiguration();
conf.setLong(
YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}
示例7: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
// start minicluster
conf = new YarnConfiguration();
conf.setLong(
YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
// set the minimum allocation so that resource decrease can go under 1024
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}
示例8: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
// start minicluster
conf = new YarnConfiguration();
conf.setLong(
YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}
示例9: start
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
public void start() throws IOException {
YarnConfiguration clusterConf = new YarnConfiguration();
final File hdfsBase = Files.createTempDirectory("temp-hdfs-").toFile();
clusterConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsBase.getAbsolutePath());
hdfsCluster = new MiniDFSCluster.Builder(clusterConf).nameNodeHttpPort(57000).startupOption(HdfsServerConstants.StartupOption.REGULAR).build();
clusterConf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
clusterConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
miniYARNCluster = new MiniYARNCluster("testMRJOb", 1, 1, 1);
miniYARNCluster.init(clusterConf);
miniYARNCluster.start();
configuration = miniYARNCluster.getConfig();
fileSystem = new Path("hdfs://localhost:" + hdfsCluster.getNameNodePort() + "/").getFileSystem(configuration);
}
示例10: start
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@Override
public void start() throws Exception {
LOG.info("YARN: Starting MiniYarnCluster");
configure();
miniYARNCluster = new MiniYARNCluster(testName, numResourceManagers, numNodeManagers,
numLocalDirs, numLogDirs, enableHa);
miniYARNCluster.serviceInit(configuration);
miniYARNCluster.init(configuration);
miniYARNCluster.start();
}
示例11: createClientAndCluster
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
private static void createClientAndCluster(Configuration conf)
throws Exception {
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
assertTrue("All node managers did not connect to the RM within the "
+ "allotted 5-second timeout",
yarnCluster.waitForNodeManagersToConnect(5000L));
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
assertEquals("Not all node managers were reported running",
nodeCount, nodeReports.size());
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}
示例12: before
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@Override
protected void before() throws Throwable {
super.before();
yarnCluster = new MiniYARNCluster(name, 1, 1, 1);
yarnCluster.init(getConfiguration());
yarnCluster.start();
}
示例13: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
// start minicluster
conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}
示例14: createMiniYARNCluster
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
static public MiniYARNCluster createMiniYARNCluster(Configuration yarnConf, int numOfNodeManagers) throws Exception {
yarnConf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
yarnConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
MiniYARNCluster miniYarnCluster = new MiniYARNCluster("yarn", numOfNodeManagers, 1, 1);
miniYarnCluster.init(yarnConf);
yarnConf.set("yarn.resourcemanager.scheduler.address", "0.0.0.0:8030") ;
miniYarnCluster.start();
//wait to make sure the server is started
//TODO: find a way to fix this
Thread.sleep(3000);
return miniYarnCluster ;
}
示例15: init
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
private static final void init(File folder) throws IOException {
// Starts Zookeeper
zkServer = InMemoryZKServer.builder().build();
zkServer.startAndWait();
// Start YARN mini cluster
LOG.info("Starting Mini DFS on path {}", folder);
Configuration fsConf = new HdfsConfiguration(new Configuration());
fsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, folder.getAbsolutePath());
dfsCluster = new MiniDFSCluster.Builder(fsConf).numDataNodes(1).build();
Configuration conf = new YarnConfiguration(dfsCluster.getFileSystem().getConf());
if (YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_20)) {
conf.set("yarn.resourcemanager.scheduler.class",
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler");
} else {
conf.set("yarn.resourcemanager.scheduler.class",
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler");
conf.set("yarn.scheduler.capacity.resource-calculator",
"org.apache.hadoop.yarn.util.resource.DominantResourceCalculator");
conf.setBoolean("yarn.scheduler.include-port-in-node-name", true);
}
conf.set("yarn.nodemanager.vmem-pmem-ratio", "20.1");
conf.set("yarn.nodemanager.vmem-check-enabled", "false");
conf.set("yarn.scheduler.minimum-allocation-mb", "128");
conf.set("yarn.nodemanager.delete.debug-delay-sec", "3600");
cluster = new MiniYARNCluster("test-cluster", 3, 1, 1);
cluster.init(conf);
cluster.start();
config = new YarnConfiguration(cluster.getConfig());
runnerService = createTwillRunnerService();
runnerService.startAndWait();
yarnAppClient = new VersionDetectYarnAppClientFactory().create(conf);
yarnAppClient.start();
}