本文整理匯總了Java中org.apache.hadoop.yarn.client.api.YarnClient.getApplicationReport方法的典型用法代碼示例。如果您正苦於以下問題:Java YarnClient.getApplicationReport方法的具體用法?Java YarnClient.getApplicationReport怎麽用?Java YarnClient.getApplicationReport使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.yarn.client.api.YarnClient
的用法示例。
在下文中一共展示了YarnClient.getApplicationReport方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: verifyApplicationState
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
private int verifyApplicationState(ApplicationId appId) throws IOException,
YarnException {
YarnClient yarnClient = createYarnClient();
try {
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
switch (appReport.getYarnApplicationState()) {
case NEW:
case NEW_SAVING:
case SUBMITTED:
return -1;
case ACCEPTED:
case RUNNING:
case FAILED:
case FINISHED:
case KILLED:
default:
break;
}
} finally {
yarnClient.close();
}
return 0;
}
示例2: waitTillAccepted
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
private void waitTillAccepted(YarnClient rmClient, ApplicationId appId)
throws Exception {
try {
long start = System.currentTimeMillis();
ApplicationReport report = rmClient.getApplicationReport(appId);
while (YarnApplicationState.ACCEPTED != report.getYarnApplicationState()) {
if (System.currentTimeMillis() - start > 20 * 1000) {
throw new Exception("App '" + appId +
"' time out, failed to reach ACCEPTED state");
}
Thread.sleep(200);
report = rmClient.getApplicationReport(appId);
}
} catch (Exception ex) {
throw new Exception(ex);
}
}
示例3: waitTillAccepted
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
private void waitTillAccepted(YarnClient rmClient, ApplicationId appId,
boolean unmanagedApplication)
throws Exception {
try {
long start = System.currentTimeMillis();
ApplicationReport report = rmClient.getApplicationReport(appId);
while (YarnApplicationState.ACCEPTED != report.getYarnApplicationState()) {
if (System.currentTimeMillis() - start > 20 * 1000) {
throw new Exception("App '" + appId +
"' time out, failed to reach ACCEPTED state");
}
Thread.sleep(200);
report = rmClient.getApplicationReport(appId);
}
Assert.assertEquals(unmanagedApplication, report.isUnmanagedApp());
} catch (Exception ex) {
throw new Exception(ex);
}
}
示例4: pollFinishedApplicationState
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
public static YarnApplicationState pollFinishedApplicationState(YarnClient client, ApplicationId appId)
throws IOException, YarnException, InterruptedException {
EnumSet<YarnApplicationState> finishedState = EnumSet.of(FINISHED, KILLED, FAILED);
while (true) {
ApplicationReport report = client.getApplicationReport(appId);
YarnApplicationState state = report.getYarnApplicationState();
if (finishedState.contains(state)) {
return state;
} else {
Thread.sleep(250);
}
}
}
示例5: retrieveDiagnostics
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
public static String retrieveDiagnostics(YarnClient yarnClient, String applicationId) throws Exception {
ApplicationReport report = yarnClient.getApplicationReport(ConverterUtils.toApplicationId(applicationId));
String diagnostics = report.getDiagnostics();
if (isDiagnosticsEmpty(diagnostics)) {
throw new Exception("Retrieved Empty Diagnostics for " + applicationId);
}
return diagnostics;
}
示例6: getClusterSpec
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
static ClusterSpec getClusterSpec(YarnClient client, ApplicationId appId) throws Exception {
ClusterSpec clusterSpec = ClusterSpec.empty();
ApplicationReport report = client.getApplicationReport(appId);
YarnApplicationState state = report.getYarnApplicationState();
if (state.equals(YarnApplicationState.RUNNING)) {
String hostname = report.getHost();
int port = report.getRpcPort();
TFApplicationRpc rpc = TFApplicationRpcClient.getInstance(hostname, port);
String spec = rpc.getClusterSpec();
if (spec != null) {
clusterSpec = ClusterSpec.fromJsonString(spec);
}
}
return clusterSpec;
}
示例7: getApplicationState
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
private YarnApplicationState getApplicationState(ApplicationId appId)
throws IOException, YarnException {
YarnClient yarnClient = createYarnClient();
try {
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
return appReport.getYarnApplicationState();
} finally {
yarnClient.close();
}
}
示例8: getReport
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
@Override
public YarnApplicationReport getReport() {
YarnClient yarnClient = createYarnClient();
try {
return new Hadoop21YarnApplicationReport(yarnClient.getApplicationReport(appId));
} catch (YarnException | IOException e) {
throw new RuntimeException("Failed to get application report for " + appId, e);
} finally {
yarnClient.stop();
}
}
示例9: run
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
public void run(String[] args) throws Exception {
YarnConfiguration conf = new YarnConfiguration();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
String cryptopath = "";
final String zonepaths = args[0];
final String exclasspath = args[1];
final String globalKMS = args.length == 2 ? "localhost:16000" : args[2];
final String pluginURI = conf.get(NuCypherExtRpcServer.NUCYPHER_EXT_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
InetSocketAddress.createUnresolved(new URI(conf.get("fs.defaultFS")).getHost(),
NuCypherExtRpcServer.DEFAULT_PORT).toString());
String providers = conf.get(KEY_PROVIDER_PATH);
YarnClientApplication app = yarnClient.createApplication();
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
amContainer.setCommands(
Collections.singletonList(
//"/usr/bin/java"+
Environment.JAVA_HOME.$$() + "/bin/java" +
" -Xmx1024M"+
" ApplicationMasterKMS"+
" " + zonepaths +
" " + exclasspath +
" " + globalKMS +
" " + providers +
" " + pluginURI +
" 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" +
" 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));
ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
ApplicationId appId = appContext.getApplicationId();
LocalResource appMasterJar = Records.newRecord(LocalResource.class);
// setupAppMasterJar(jarPath, appMasterJar);
Map<String, LocalResource> localResources = new HashMap<>();
//localResources.put("prkeyrotation.jar", appMasterJar);
setupExtJar(exclasspath, localResources, appId.toString());
amContainer.setLocalResources(localResources);
Map<String, String> appMasterEnv = new HashMap<String, String>();
setupAppMasterEnv(appMasterEnv, exclasspath);
amContainer.setEnvironment(appMasterEnv);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(1024);
capability.setVirtualCores(1);
appContext.setApplicationName("prkeyrotation");
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setQueue("default");
System.out.println("Submitting application "+appId);
yarnClient.submitApplication(appContext);
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
YarnApplicationState appState = appReport.getYarnApplicationState();
while (appState != YarnApplicationState.FINISHED &&
appState != YarnApplicationState.KILLED &&
appState != YarnApplicationState.FAILED) {
Thread.sleep(100);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
}
System.out.println("Application " + appId + " finished with " +
" state " + appState +
" at " + appReport.getFinishTime());
}
示例10: run
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
private void run(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("Usage: [options] [commands..]");
System.out.println("options: [-file filename] [-appcp appClasspath]");
return;
}
this.initArgs(args);
// Create yarnClient
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// Create application via yarnClient
YarnClientApplication app = yarnClient.createApplication();
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records
.newRecord(ContainerLaunchContext.class);
ApplicationSubmissionContext appContext = app
.getApplicationSubmissionContext();
// Submit application
ApplicationId appId = appContext.getApplicationId();
//add ctrl+c signal handler
CtrlCHandler handler = new CtrlCHandler(appId, yarnClient);
Signal intSignal = new Signal("INT");
Signal.handle(intSignal, handler);
// setup security token
amContainer.setTokens(this.setupTokens());
// setup cache-files and environment variables
amContainer.setLocalResources(this.setupCacheFiles(appId));
amContainer.setEnvironment(this.getEnvironment());
String cmd = Environment.JAVA_HOME.$$() + "/bin/java"
+ " -Xmx900m"
+ " org.apache.hadoop.yarn.dmlc.ApplicationMaster"
+ this.cacheFileArg + ' ' + this.appArgs + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
LOG.debug(cmd);
amContainer.setCommands(Collections.singletonList(cmd));
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(1024);
capability.setVirtualCores(1);
LOG.info("jobname=" + this.jobName + ",username=" + this.userName);
appContext.setApplicationName(jobName + ":DMLC-YARN");
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setQueue(queue);
//appContext.setUser(userName);
LOG.info("Submitting application " + appId);
yarnClient.submitApplication(appContext);
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
YarnApplicationState appState = appReport.getYarnApplicationState();
while (appState != YarnApplicationState.FINISHED
&& appState != YarnApplicationState.KILLED
&& appState != YarnApplicationState.FAILED) {
Thread.sleep(100);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
}
System.out.println("Application " + appId + " finished with"
+ " state " + appState + " at " + appReport.getFinishTime());
if (!appReport.getFinalApplicationStatus().equals(
FinalApplicationStatus.SUCCEEDED)) {
System.err.println(appReport.getDiagnostics());
System.out.println("Available queues:");
for (QueueInfo q : yarnClient.getAllQueues()) {
System.out.println(q.getQueueName());
}
yarnClient.killApplication(appId);
}
}
示例11: filterAppsByAggregatedStatus
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
@VisibleForTesting
void filterAppsByAggregatedStatus() throws IOException, YarnException {
YarnClient client = YarnClient.createYarnClient();
try {
client.init(getConf());
client.start();
for (Iterator<AppInfo> it = eligibleApplications.iterator();
it.hasNext();) {
AppInfo app = it.next();
try {
ApplicationReport report = client.getApplicationReport(
ConverterUtils.toApplicationId(app.getAppId()));
LogAggregationStatus aggStatus = report.getLogAggregationStatus();
if (aggStatus.equals(LogAggregationStatus.RUNNING) ||
aggStatus.equals(LogAggregationStatus.RUNNING_WITH_FAILURE) ||
aggStatus.equals(LogAggregationStatus.NOT_START) ||
aggStatus.equals(LogAggregationStatus.DISABLED) ||
aggStatus.equals(LogAggregationStatus.FAILED)) {
if (verbose) {
LOG.info("Skipping " + app.getAppId() +
" due to aggregation status being " + aggStatus);
}
it.remove();
} else {
if (verbose) {
LOG.info(app.getAppId() + " has aggregation status " + aggStatus);
}
app.setFinishTime(report.getFinishTime());
}
} catch (ApplicationNotFoundException e) {
// Assume the aggregation has finished
if (verbose) {
LOG.info(app.getAppId() + " not in the ResourceManager");
}
}
}
} finally {
if (client != null) {
client.stop();
}
}
}