本文整理汇总了Java中org.apache.hadoop.yarn.server.MiniYARNCluster.getConfig方法的典型用法代码示例。如果您正苦于以下问题:Java MiniYARNCluster.getConfig方法的具体用法?Java MiniYARNCluster.getConfig怎么用?Java MiniYARNCluster.getConfig使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.yarn.server.MiniYARNCluster
的用法示例。
在下文中一共展示了MiniYARNCluster.getConfig方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: start
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
public void start() throws IOException {
YarnConfiguration clusterConf = new YarnConfiguration();
final File hdfsBase = Files.createTempDirectory("temp-hdfs-").toFile();
clusterConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsBase.getAbsolutePath());
hdfsCluster = new MiniDFSCluster.Builder(clusterConf).nameNodeHttpPort(57000).startupOption(HdfsServerConstants.StartupOption.REGULAR).build();
clusterConf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
clusterConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
miniYARNCluster = new MiniYARNCluster("testMRJOb", 1, 1, 1);
miniYARNCluster.init(clusterConf);
miniYARNCluster.start();
configuration = miniYARNCluster.getConfig();
fileSystem = new Path("hdfs://localhost:" + hdfsCluster.getNameNodePort() + "/").getFileSystem(configuration);
}
示例2: setupYarnClient
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
private YarnClient setupYarnClient(MiniYARNCluster cluster) {
final Configuration yarnConf = cluster.getConfig();
YarnClient client = YarnClient.createYarnClient();
client.init(yarnConf);
client.start();
return client;
}
示例3: init
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
private static final void init(File folder) throws IOException {
// Starts Zookeeper
zkServer = InMemoryZKServer.builder().build();
zkServer.startAndWait();
// Start YARN mini cluster
LOG.info("Starting Mini DFS on path {}", folder);
Configuration fsConf = new HdfsConfiguration(new Configuration());
fsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, folder.getAbsolutePath());
dfsCluster = new MiniDFSCluster.Builder(fsConf).numDataNodes(1).build();
Configuration conf = new YarnConfiguration(dfsCluster.getFileSystem().getConf());
if (YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_20)) {
conf.set("yarn.resourcemanager.scheduler.class",
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler");
} else {
conf.set("yarn.resourcemanager.scheduler.class",
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler");
conf.set("yarn.scheduler.capacity.resource-calculator",
"org.apache.hadoop.yarn.util.resource.DominantResourceCalculator");
conf.setBoolean("yarn.scheduler.include-port-in-node-name", true);
}
conf.set("yarn.nodemanager.vmem-pmem-ratio", "20.1");
conf.set("yarn.nodemanager.vmem-check-enabled", "false");
conf.set("yarn.scheduler.minimum-allocation-mb", "128");
conf.set("yarn.nodemanager.delete.debug-delay-sec", "3600");
cluster = new MiniYARNCluster("test-cluster", 3, 1, 1);
cluster.init(conf);
cluster.start();
config = new YarnConfiguration(cluster.getConfig());
runnerService = createTwillRunnerService();
runnerService.startAndWait();
yarnAppClient = new VersionDetectYarnAppClientFactory().create(conf);
yarnAppClient.start();
}
示例4: testSubmitIncorrectQueue
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testSubmitIncorrectQueue() throws IOException {
MiniYARNCluster cluster = new MiniYARNCluster("testMRAMTokens", 1, 1, 1);
YarnClient rmClient = null;
try {
cluster.init(new YarnConfiguration());
cluster.start();
final Configuration yarnConf = cluster.getConfig();
rmClient = YarnClient.createYarnClient();
rmClient.init(yarnConf);
rmClient.start();
YarnClientApplication newApp = rmClient.createApplication();
ApplicationId appId = newApp.getNewApplicationResponse().getApplicationId();
// Create launch context for app master
ApplicationSubmissionContext appContext
= Records.newRecord(ApplicationSubmissionContext.class);
// set the application id
appContext.setApplicationId(appId);
// set the application name
appContext.setApplicationName("test");
// Set the queue to which this application is to be submitted in the RM
appContext.setQueue("nonexist");
// Set up the container launch context for the application master
ContainerLaunchContext amContainer
= Records.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
appContext.setResource(Resource.newInstance(1024, 1));
// appContext.setUnmanagedAM(unmanaged);
// Submit the application to the applications manager
rmClient.submitApplication(appContext);
Assert.fail("Job submission should have thrown an exception");
} catch (YarnException e) {
Assert.assertTrue(e.getMessage().contains("Failed to submit"));
} finally {
if (rmClient != null) {
rmClient.stop();
}
cluster.stop();
}
}
示例5: testGetQueueInfoPreemptionEnabled
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@Test
public void testGetQueueInfoPreemptionEnabled() throws Exception {
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
ReservationSystemTestUtil.setupQueueConfiguration(conf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
"org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity."
+ "ProportionalCapacityPreemptionPolicy");
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
MiniYARNCluster cluster =
new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
YarnClient yarnClient = null;
try {
cluster.init(conf);
cluster.start();
final Configuration yarnConf = cluster.getConfig();
yarnClient = YarnClient.createYarnClient();
yarnClient.init(yarnConf);
yarnClient.start();
QueueCLI cli = new QueueCLI();
cli.setClient(yarnClient);
cli.setSysOutPrintStream(sysOut);
cli.setSysErrPrintStream(sysErr);
sysOutStream.reset();
int result = cli.run(new String[] { "-status", "a1" });
assertEquals(0, result);
Assert.assertTrue(sysOutStream.toString()
.contains("Preemption : enabled"));
} finally {
// clean-up
if (yarnClient != null) {
yarnClient.stop();
}
cluster.stop();
cluster.close();
}
}
示例6: testGetQueueInfoPreemptionDisabled
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@Test
public void testGetQueueInfoPreemptionDisabled() throws Exception {
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
ReservationSystemTestUtil.setupQueueConfiguration(conf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
"org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity."
+ "ProportionalCapacityPreemptionPolicy");
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
conf.setBoolean(
"yarn.scheduler.capacity.root.a.a1.disable_preemption", true);
MiniYARNCluster cluster =
new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
YarnClient yarnClient = null;
try {
cluster.init(conf);
cluster.start();
final Configuration yarnConf = cluster.getConfig();
yarnClient = YarnClient.createYarnClient();
yarnClient.init(yarnConf);
yarnClient.start();
QueueCLI cli = new QueueCLI();
cli.setClient(yarnClient);
cli.setSysOutPrintStream(sysOut);
cli.setSysErrPrintStream(sysErr);
sysOutStream.reset();
int result = cli.run(new String[] { "-status", "a1" });
assertEquals(0, result);
Assert.assertTrue(sysOutStream.toString()
.contains("Preemption : disabled"));
} finally {
// clean-up
if (yarnClient != null) {
yarnClient.stop();
}
cluster.stop();
cluster.close();
}
}
示例7: before
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入方法依赖的package包/类
@Override
protected void before() throws Throwable {
tmpFolder.create();
// Starts Zookeeper
zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build();
zkServer.startAndWait();
// Start YARN mini cluster
File miniDFSDir = tmpFolder.newFolder();
LOG.info("Starting Mini DFS on path {}", miniDFSDir);
Configuration fsConf = new HdfsConfiguration(new Configuration());
fsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, miniDFSDir.getAbsolutePath());
for (Map.Entry<String, String> entry : extraConfig.entrySet()) {
fsConf.set(entry.getKey(), entry.getValue());
}
dfsCluster = new MiniDFSCluster.Builder(fsConf).numDataNodes(1).build();
Configuration conf = new YarnConfiguration(dfsCluster.getFileSystem().getConf());
if (YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_20)) {
conf.set("yarn.resourcemanager.scheduler.class",
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler");
} else {
conf.set("yarn.resourcemanager.scheduler.class",
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler");
conf.set("yarn.scheduler.capacity.resource-calculator",
"org.apache.hadoop.yarn.util.resource.DominantResourceCalculator");
conf.setBoolean("yarn.scheduler.include-port-in-node-name", true);
}
conf.set("yarn.nodemanager.vmem-pmem-ratio", "100.1");
conf.set("yarn.nodemanager.vmem-check-enabled", "false");
conf.set("yarn.scheduler.minimum-allocation-mb", "128");
conf.set("yarn.nodemanager.delete.debug-delay-sec", "3600");
conf.set(Configs.Keys.LOCAL_STAGING_DIRECTORY, tmpFolder.newFolder().getAbsolutePath());
cluster = new MiniYARNCluster("test-cluster", 3, 1, 1);
cluster.init(conf);
cluster.start();
config = new YarnConfiguration(cluster.getConfig());
twillRunner = createTwillRunnerService();
twillRunner.start();
yarnAppClient = new VersionDetectYarnAppClientFactory().create(conf);
}