本文整理汇总了Java中org.apache.hadoop.yarn.server.MiniYARNCluster类的典型用法代码示例。如果您正苦于以下问题:Java MiniYARNCluster类的具体用法?Java MiniYARNCluster怎么用?Java MiniYARNCluster使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MiniYARNCluster类属于org.apache.hadoop.yarn.server包,在下文中一共展示了MiniYARNCluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@Before
public void setup() throws IOException {
fakeAppId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE);
setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE);
conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true);
cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 1, 1, 1);
}
示例2: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@Before
public void setup() throws IOException {
fakeAppId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf);
HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf);
conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true);
cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 1, 1, 1);
}
示例3: setupClass
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
/**
* Sets up this class for use in unit testing. It spins up the YARN mini-cluster and also sets up
* various default classes.
*
* @throws IOException if there's an error accessing the local filesystem
* @throws SQLException if there's an error querying the embedded DB
*/
@BeforeClass
public static void setupClass() throws IOException, SQLException {
conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
miniCluster = new MiniYARNCluster("test", 1, 1, 1);
miniCluster.init(conf);
miniCluster.start();
conflictHandler = new ObjectConflictHandler();
conflictHandler.setConf(conf);
destinationObjectFactory = new DestinationObjectFactory();
destinationObjectFactory.setConf(conf);
}
示例4: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
// start minicluster
conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}
示例5: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@Before
public void setup() throws IOException {
fakeAppId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf);
HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf);
conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true);
conf.set(YarnConfiguration.LEADER_CLIENT_FAILOVER_PROXY_PROVIDER,
"org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider");
cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 1, 1, 1);
}
示例6: setupMiniYARNCluster
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
private MiniYARNCluster setupMiniYARNCluster() throws Exception {
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
ReservationSystemTestUtil.setupQueueConfiguration(conf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
final MiniYARNCluster cluster =
new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
cluster.init(conf);
cluster.start();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return cluster.getResourceManager().getRMContext()
.getReservationSystem()
.getPlan(ReservationSystemTestUtil.reservationQ)
.getTotalCapacity().getMemorySize() > 6000;
}
}, 10, 10000);
return cluster;
}
示例7: start
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@Override
public synchronized void start() {
try {
if (!getConfig().getBoolean(
JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS,
JHAdminConfig.DEFAULT_MR_HISTORY_MINICLUSTER_FIXED_PORTS)) {
// pick free random ports.
getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
MiniYARNCluster.getHostname() + ":0");
getConfig().set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
MiniYARNCluster.getHostname() + ":0");
}
super.start();
} catch (Throwable t) {
throw new YarnRuntimeException(t);
}
LOG.info("MiniMRYARN ResourceManager address: " +
getConfig().get(YarnConfiguration.RM_ADDRESS));
LOG.info("MiniMRYARN ResourceManager web address: " +
getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS));
LOG.info("MiniMRYARN HistoryServer address: " +
getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
LOG.info("MiniMRYARN HistoryServer web address: " +
getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS));
}
示例8: start
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@Override
public synchronized void start() {
try {
if (!getConfig().getBoolean(
JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS,
JHAdminConfig.DEFAULT_MR_HISTORY_MINICLUSTER_FIXED_PORTS)) {
// pick free random ports.
getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
MiniYARNCluster.getHostname() + ":0");
getConfig().set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
MiniYARNCluster.getHostname() + ":0");
}
super.start();
} catch (Throwable t) {
throw new YarnException(t);
}
LOG.info("MiniMRYARN ResourceManager address: " +
getConfig().get(YarnConfiguration.RM_ADDRESS));
LOG.info("MiniMRYARN ResourceManager web address: " +
getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS));
LOG.info("MiniMRYARN HistoryServer address: " +
getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
LOG.info("MiniMRYARN HistoryServer web address: " +
getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS));
}
示例9: testRMWebAppRedirect
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@Test
public void testRMWebAppRedirect() throws YarnException,
InterruptedException, IOException {
cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 0, 1, 1);
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
String rm1Url = "http://0.0.0.0:18088";
String rm2Url = "http://0.0.0.0:28088";
String header = getHeader("Refresh", rm2Url);
assertTrue(header.contains("; url=" + rm1Url));
header = getHeader("Refresh", rm2Url + "/cluster/cluster");
assertEquals(null, header);
header = getHeader("Refresh", rm2Url + "/ws/v1/cluster/info");
assertEquals(null, header);
header = getHeader("Refresh", rm2Url + "/ws/v1/cluster/apps");
assertTrue(header.contains("; url=" + rm1Url));
// Due to the limitation of MiniYARNCluster and dispatcher is a singleton,
// we couldn't add the test case after explicitFailover();
}
示例10: start
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
public void start() throws IOException {
log.info("Checking if cluster=" + clusterName + " needs to be started");
synchronized (this.startupShutdownMonitor) {
if (started) {
return;
}
log.info("Starting cluster=" + clusterName);
configuration = new YarnConfiguration();
//configuration.setBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, true);
configuration.setBoolean("yarn.is.minicluster", true);
configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/" + clusterName + "-dfs");
dfsCluster = new MiniDFSCluster.Builder(configuration).
numDataNodes(nodes).
build();
yarnCluster = new MiniYARNCluster(clusterName, nodes, 1, 1);
yarnCluster.init(configuration);
yarnCluster.start();
log.info("Started cluster=" + clusterName);
started = true;
}
}
示例11: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
// start minicluster
conf = new YarnConfiguration();
conf.setLong(
YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}
示例12: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
// start minicluster
conf = new YarnConfiguration();
conf.setLong(
YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
// set the minimum allocation so that resource decrease can go under 1024
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}
示例13: serviceStart
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@Override
public synchronized void serviceStart() throws Exception {
try {
if (!getConfig().getBoolean(
JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS,
JHAdminConfig.DEFAULT_MR_HISTORY_MINICLUSTER_FIXED_PORTS)) {
String hostname = MiniYARNCluster.getHostname();
// pick free random ports.
getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
hostname + ":0");
MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(), hostname
+ ":0");
getConfig().set(JHAdminConfig.JHS_ADMIN_ADDRESS,
hostname + ":0");
}
historyServer = new JobHistoryServer();
historyServer.init(getConfig());
new Thread() {
public void run() {
historyServer.start();
jhsStarted = true;
};
}.start();
while (!jhsStarted) {
LOG.info("Waiting for HistoryServer to start...");
Thread.sleep(1500);
}
//TODO Add a timeout. State.STOPPED check ?
if (historyServer.getServiceState() != STATE.STARTED) {
throw new IOException("HistoryServer failed to start");
}
super.serviceStart();
} catch (Throwable t) {
throw new YarnRuntimeException(t);
}
}
示例14: setup
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
// start minicluster
conf = new YarnConfiguration();
conf.setLong(
YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}
示例15: start
import org.apache.hadoop.yarn.server.MiniYARNCluster; //导入依赖的package包/类
public void start() throws IOException {
YarnConfiguration clusterConf = new YarnConfiguration();
final File hdfsBase = Files.createTempDirectory("temp-hdfs-").toFile();
clusterConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsBase.getAbsolutePath());
hdfsCluster = new MiniDFSCluster.Builder(clusterConf).nameNodeHttpPort(57000).startupOption(HdfsServerConstants.StartupOption.REGULAR).build();
clusterConf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
clusterConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
miniYARNCluster = new MiniYARNCluster("testMRJOb", 1, 1, 1);
miniYARNCluster.init(clusterConf);
miniYARNCluster.start();
configuration = miniYARNCluster.getConfig();
fileSystem = new Path("hdfs://localhost:" + hdfsCluster.getNameNodePort() + "/").getFileSystem(configuration);
}