本文整理匯總了Java中org.apache.hadoop.yarn.client.api.YarnClient.createYarnClient方法的典型用法代碼示例。如果您正苦於以下問題:Java YarnClient.createYarnClient方法的具體用法?Java YarnClient.createYarnClient怎麽用?Java YarnClient.createYarnClient使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.yarn.client.api.YarnClient
的用法示例。
在下文中一共展示了YarnClient.createYarnClient方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: killApplication
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
public static void killApplication(String applicationId) throws Exception {
try {
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
LOGGER.logInfo("[yarn application -kill %s]", applicationId);
yarnClient.killApplication(ConverterUtils.toApplicationId(applicationId));
yarnClient.stop();
} catch (ApplicationNotFoundException ignored) {
} catch (Exception e) {
if (e.getMessage().toLowerCase().contains("invalid applicationid")) {
// ignored
} else {
throw e;
}
}
}
示例2: verifyClientConnection
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
private void verifyClientConnection() {
int numRetries = 3;
while(numRetries-- > 0) {
Configuration conf = new YarnConfiguration(this.conf);
YarnClient client = YarnClient.createYarnClient();
client.init(conf);
client.start();
try {
client.getApplications();
return;
} catch (Exception e) {
LOG.error(e);
} finally {
client.stop();
}
}
fail("Client couldn't connect to the Active RM");
}
示例3: testParseTimelineDelegationTokenRenewer
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
@Test
public void testParseTimelineDelegationTokenRenewer() throws Exception {
// Client side
YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient();
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.set(YarnConfiguration.RM_PRINCIPAL, "rm/[email protected]");
conf.set(
YarnConfiguration.RM_ADDRESS, "localhost:8188");
try {
client.init(conf);
client.start();
Assert.assertEquals("rm/[email protected]", client.timelineDTRenewer);
} finally {
client.stop();
}
}
示例4: killJobOnCluster
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
public static void killJobOnCluster(String applicationId, Logger log) throws YarnException,
IOException {
YarnConfiguration yarnConf = new YarnConfiguration();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(yarnConf);
yarnClient.start();
String[] split = applicationId.split("_",-1);
ApplicationId aid = ApplicationId.newInstance(Long.parseLong(split[1]),
Integer.parseInt(split[2]));
yarnClient.killApplication(aid);
log.info("successfully killed application: " + aid);
yarnClient.close();
}
示例5: ClusterInfo
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
ClusterInfo(String name, YarnClusterConfiguration conf) {
this.name = name;
this.client = YarnClient.createYarnClient();
client.init(conf.conf());
client.start();
this.conf = conf;
}
示例6: testCreateAthenaXCluster
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
@Test
public void testCreateAthenaXCluster() throws Exception {
ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
Configuration flinkConf = new Configuration();
flinkConf.setString(JobManagerOptions.ADDRESS, "localhost");
try (MiniAthenaXCluster cluster = new MiniAthenaXCluster(JobDeployerITest.class.getSimpleName())) {
cluster.start();
YarnConfiguration conf = cluster.getYarnConfiguration();
YarnClusterConfiguration clusterConf = cluster.getYarnClusterConf();
final ApplicationId appId;
try (YarnClient client = YarnClient.createYarnClient()) {
client.init(conf);
client.start();
JobDeployer deployer = new JobDeployer(clusterConf, client, executor, flinkConf);
appId = deployer.createApplication();
InstanceMetadata md = new InstanceMetadata(UUID.randomUUID(), UUID.randomUUID());
JobConf jobConf = new JobConf(appId, "test", Collections.emptyList(), null, 1, 2048, md);
deployer.start(JobITestUtil.trivialJobGraph(), jobConf);
YarnApplicationState state = MiniAthenaXCluster.pollFinishedApplicationState(client, appId);
assertEquals(FINISHED, state);
}
}
}
示例7: getLiveContainerIdsFromRM
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
public static HashSet<String> getLiveContainerIdsFromRM(String attemptId, String amContainerId) throws Exception {
HashSet<String> containerIds = new HashSet<>();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
List<ContainerReport> containerReports = yarnClient.getContainers(ConverterUtils.toApplicationAttemptId(attemptId));
yarnClient.stop();
// Since we at least has AM container, so we check whether the containerReports is reliable
if (containerReports == null) {
throw new Exception(
String.format("Container reports of attempt %s is empty , but AM container exists",
attemptId));
}
for (ContainerReport containerReport : containerReports) {
if (containerReport.getContainerState() == ContainerState.COMPLETE) {
continue;
}
containerIds.add(containerReport.getContainerId().toString());
}
if (!containerIds.contains(amContainerId)) {
throw new Exception(
String.format("Container reports of attempt %s does not contain AM container %s",
attemptId, amContainerId));
}
containerIds.remove(amContainerId);
return containerIds;
}
示例8: initialize
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
@Override
protected void initialize() throws Exception {
super.initialize();
transitionFrameworkStateQueue = new SystemTaskQueue(this::handleException);
// Initialize LauncherConfiguration
conf = YamlUtils.toObject(GlobalConstants.LAUNCHER_CONFIG_FILE, LauncherConfiguration.class);
ModelValidation.validate(conf);
// Initialize SubServices
yarnClient = YarnClient.createYarnClient();
yarnClient.init(yarnConf);
yarnClient.start();
// Initialize Launcher Store
zkStore = new ZookeeperStore(conf.getZkConnectString(), conf.getZkRootDir());
hdfsStore = new HdfsStore(conf.getHdfsRootDir());
// Initialize other components
rmResyncHandler = new RMResyncHandler(this, conf, yarnClient);
diagnosticsRetrieveHandler = new DiagnosticsRetrieveHandler(this, conf, yarnClient);
// Initialize External Service
webServer = new WebServer(conf, zkStore);
// Log Initialized Configuration
LOGGER.logSplittedLines(Level.INFO,
"Initialized %s with Configuration:\n%s",
serviceName, WebCommon.toJson(conf));
}
示例9: ResourceMgrDelegate
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
/**
* Delegate responsible for communicating with the Resource Manager's
* {@link ApplicationClientProtocol}.
* @param conf the configuration object.
*/
public ResourceMgrDelegate(YarnConfiguration conf) {
super(ResourceMgrDelegate.class.getName());
this.conf = conf;
this.client = YarnClient.createYarnClient();
init(conf);
start();
}
示例10: createAndStartYarnClient
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
protected YarnClient createAndStartYarnClient(Configuration conf) {
Configuration configuration = new YarnConfiguration(conf);
YarnClient client = YarnClient.createYarnClient();
client.init(configuration);
client.start();
return client;
}
示例11: testClientStop
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
@Test
public void testClientStop() {
Configuration conf = new Configuration();
ResourceManager rm = new ResourceManager();
rm.init(conf);
rm.start();
YarnClient client = YarnClient.createYarnClient();
client.init(conf);
client.start();
client.stop();
rm.stop();
}
示例12: start
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
public void start() {
if (client != null) {
return;
}
UserGroupInformation.setConfiguration(cfg);
client = YarnClient.createYarnClient();
client.init(cfg);
client.start();
}
示例13: testEnvironmentLibShipping
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
/**
* Tests to ship a lib folder through the {@code ConfigConstants.ENV_FLINK_LIB_DIR}.
*/
@Test
public void testEnvironmentLibShipping() throws Exception {
AbstractYarnClusterDescriptor descriptor = new YarnClusterDescriptor(
new Configuration(),
temporaryFolder.getRoot().getAbsolutePath(),
YarnClient.createYarnClient());
try {
File libFolder = temporaryFolder.newFolder().getAbsoluteFile();
File libFile = new File(libFolder, "libFile.jar");
libFile.createNewFile();
Set<File> effectiveShipFiles = new HashSet<>();
final Map<String, String> oldEnv = System.getenv();
try {
Map<String, String> env = new HashMap<>(1);
env.put(ConfigConstants.ENV_FLINK_LIB_DIR, libFolder.getAbsolutePath());
CommonTestUtils.setEnv(env);
// only execute part of the deployment to test for shipped files
descriptor.addLibFolderToShipFiles(effectiveShipFiles);
} finally {
CommonTestUtils.setEnv(oldEnv);
}
// only add the ship the folder, not the contents
Assert.assertFalse(effectiveShipFiles.contains(libFile));
Assert.assertTrue(effectiveShipFiles.contains(libFolder));
Assert.assertFalse(descriptor.shipFiles.contains(libFile));
Assert.assertFalse(descriptor.shipFiles.contains(libFolder));
} finally {
descriptor.close();
}
}
示例14: testConfigOverwrite
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
@Test
public void testConfigOverwrite() throws ClusterDeploymentException {
Configuration configuration = new Configuration();
// overwrite vcores in config
configuration.setInteger(YarnConfigOptions.VCORES, Integer.MAX_VALUE);
final YarnClient yarnClient = YarnClient.createYarnClient();
YarnClusterDescriptor clusterDescriptor = new YarnClusterDescriptor(
configuration,
temporaryFolder.getRoot().getAbsolutePath(),
yarnClient);
clusterDescriptor.setLocalJarPath(new Path(flinkJar.getPath()));
// configure slots
ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder()
.setMasterMemoryMB(-1)
.setTaskManagerMemoryMB(-1)
.setNumberTaskManagers(1)
.setSlotsPerTaskManager(1)
.createClusterSpecification();
try {
clusterDescriptor.deploySessionCluster(clusterSpecification);
fail("The deploy call should have failed.");
} catch (ClusterDeploymentException e) {
// we expect the cause to be an IllegalConfigurationException
if (!(e.getCause() instanceof IllegalConfigurationException)) {
throw e;
}
} finally {
clusterDescriptor.close();
}
}
示例15: setup
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
@BeforeClass
public static void setup() throws Exception {
// start minicluster
conf = new YarnConfiguration();
conf.setLong(
YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
// set the minimum allocation so that resource decrease can go under 1024
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
// start rm client
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// get node info
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
priority = Priority.newInstance(1);
priority2 = Priority.newInstance(2);
capability = Resource.newInstance(1024, 1);
node = nodeReports.get(0).getNodeId().getHost();
rack = nodeReports.get(0).getRackName();
nodes = new String[]{ node };
racks = new String[]{ rack };
}