本文整理匯總了Java中org.apache.hadoop.yarn.client.api.YarnClient.killApplication方法的典型用法代碼示例。如果您正苦於以下問題:Java YarnClient.killApplication方法的具體用法?Java YarnClient.killApplication怎麽用?Java YarnClient.killApplication使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.yarn.client.api.YarnClient
的用法示例。
在下文中一共展示了YarnClient.killApplication方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: killApplication
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
public static void killApplication(String applicationId) throws Exception {
try {
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
LOGGER.logInfo("[yarn application -kill %s]", applicationId);
yarnClient.killApplication(ConverterUtils.toApplicationId(applicationId));
yarnClient.stop();
} catch (ApplicationNotFoundException ignored) {
} catch (Exception e) {
if (e.getMessage().toLowerCase().contains("invalid applicationid")) {
// ignored
} else {
throw e;
}
}
}
示例2: testKillApplication
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
@Test
public void testKillApplication() throws Exception {
MockRM rm = new MockRM();
rm.start();
RMApp app = rm.submitApp(2000);
Configuration conf = new Configuration();
@SuppressWarnings("resource")
final YarnClient client = new MockYarnClient();
client.init(conf);
client.start();
client.killApplication(app.getApplicationId());
verify(((MockYarnClient) client).getRMClient(), times(2))
.forceKillApplication(any(KillApplicationRequest.class));
}
示例3: killJobOnCluster
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
public static void killJobOnCluster(String applicationId, Logger log) throws YarnException,
IOException {
YarnConfiguration yarnConf = new YarnConfiguration();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(yarnConf);
yarnClient.start();
String[] split = applicationId.split("_",-1);
ApplicationId aid = ApplicationId.newInstance(Long.parseLong(split[1]),
Integer.parseInt(split[2]));
yarnClient.killApplication(aid);
log.info("successfully killed application: " + aid);
yarnClient.close();
}
示例4: cancel
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
@Override
public void cancel() {
YarnClient yarnClient = createYarnClient();
try {
yarnClient.killApplication(appId);
} catch (YarnException | IOException e) {
throw new RuntimeException("Failed to kill application " + appId, e);
} finally {
yarnClient.stop();
}
}
示例5: failSessionDuringDeployment
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
/**
* Kills YARN application and stops YARN client.
*
* <p>Use this method to kill the App before it has been properly deployed
*/
private void failSessionDuringDeployment(YarnClient yarnClient, YarnClientApplication yarnApplication) {
LOG.info("Killing YARN application");
try {
yarnClient.killApplication(yarnApplication.getNewApplicationResponse().getApplicationId());
} catch (Exception e) {
// we only log a debug message here because the "killApplication" call is a best-effort
// call (we don't know if the application has been deployed when the error occured).
LOG.debug("Error while killing YARN application", e);
}
yarnClient.stop();
}
示例6: run
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
private void run(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("Usage: [options] [commands..]");
System.out.println("options: [-file filename] [-appcp appClasspath]");
return;
}
this.initArgs(args);
// Create yarnClient
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// Create application via yarnClient
YarnClientApplication app = yarnClient.createApplication();
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records
.newRecord(ContainerLaunchContext.class);
ApplicationSubmissionContext appContext = app
.getApplicationSubmissionContext();
// Submit application
ApplicationId appId = appContext.getApplicationId();
//add ctrl+c signal handler
CtrlCHandler handler = new CtrlCHandler(appId, yarnClient);
Signal intSignal = new Signal("INT");
Signal.handle(intSignal, handler);
// setup security token
amContainer.setTokens(this.setupTokens());
// setup cache-files and environment variables
amContainer.setLocalResources(this.setupCacheFiles(appId));
amContainer.setEnvironment(this.getEnvironment());
String cmd = Environment.JAVA_HOME.$$() + "/bin/java"
+ " -Xmx900m"
+ " org.apache.hadoop.yarn.dmlc.ApplicationMaster"
+ this.cacheFileArg + ' ' + this.appArgs + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
LOG.debug(cmd);
amContainer.setCommands(Collections.singletonList(cmd));
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(1024);
capability.setVirtualCores(1);
LOG.info("jobname=" + this.jobName + ",username=" + this.userName);
appContext.setApplicationName(jobName + ":DMLC-YARN");
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setQueue(queue);
//appContext.setUser(userName);
LOG.info("Submitting application " + appId);
yarnClient.submitApplication(appContext);
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
YarnApplicationState appState = appReport.getYarnApplicationState();
while (appState != YarnApplicationState.FINISHED
&& appState != YarnApplicationState.KILLED
&& appState != YarnApplicationState.FAILED) {
Thread.sleep(100);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
}
System.out.println("Application " + appId + " finished with"
+ " state " + appState + " at " + appReport.getFinishTime());
if (!appReport.getFinalApplicationStatus().equals(
FinalApplicationStatus.SUCCEEDED)) {
System.err.println(appReport.getDiagnostics());
System.out.println("Available queues:");
for (QueueInfo q : yarnClient.getAllQueues()) {
System.out.println(q.getQueueName());
}
yarnClient.killApplication(appId);
}
}
示例7: testDetachedMode
import org.apache.hadoop.yarn.client.api.YarnClient; //導入方法依賴的package包/類
/**
* Test regular operation, including command line parameter parsing.
*/
@Test(timeout = 60000) // timeout after a minute.
public void testDetachedMode() throws InterruptedException, IOException {
LOG.info("Starting testDetachedMode()");
addTestAppender(FlinkYarnSessionCli.class, Level.INFO);
Runner runner =
startWithArgs(new String[]{"-j", flinkUberjar.getAbsolutePath(),
"-t", flinkLibFolder.getAbsolutePath(),
"-n", "1",
"-jm", "768",
"-tm", "1024",
"--name", "MyCustomName", // test setting a custom name
"--detached"},
"Flink JobManager is now running on", RunTypes.YARN_SESSION);
// before checking any strings outputted by the CLI, first give it time to return
runner.join();
checkForLogString("The Flink YARN client has been started in detached mode");
LOG.info("Waiting until two containers are running");
// wait until two containers are running
while (getRunningContainers() < 2) {
sleep(500);
}
//additional sleep for the JM/TM to start and establish connection
sleep(2000);
LOG.info("Two containers are running. Killing the application");
// kill application "externally".
try {
YarnClient yc = YarnClient.createYarnClient();
yc.init(YARN_CONFIGURATION);
yc.start();
List<ApplicationReport> apps = yc.getApplications(EnumSet.of(YarnApplicationState.RUNNING));
Assert.assertEquals(1, apps.size()); // Only one running
ApplicationReport app = apps.get(0);
Assert.assertEquals("MyCustomName", app.getName());
ApplicationId id = app.getApplicationId();
yc.killApplication(id);
while (yc.getApplications(EnumSet.of(YarnApplicationState.KILLED)).size() == 0) {
sleep(500);
}
} catch (Throwable t) {
LOG.warn("Killing failed", t);
Assert.fail();
} finally {
//cleanup the yarn-properties file
String confDirPath = System.getenv("FLINK_CONF_DIR");
File configDirectory = new File(confDirPath);
LOG.info("testDetachedPerJobYarnClusterInternal: Using configuration directory " + configDirectory.getAbsolutePath());
// load the configuration
LOG.info("testDetachedPerJobYarnClusterInternal: Trying to load configuration file");
Configuration configuration = GlobalConfiguration.loadConfiguration(configDirectory.getAbsolutePath());
try {
File yarnPropertiesFile = FlinkYarnSessionCli.getYarnPropertiesLocation(configuration.getString(YarnConfigOptions.PROPERTIES_FILE_LOCATION));
if (yarnPropertiesFile.exists()) {
LOG.info("testDetachedPerJobYarnClusterInternal: Cleaning up temporary Yarn address reference: {}", yarnPropertiesFile.getAbsolutePath());
yarnPropertiesFile.delete();
}
} catch (Exception e) {
LOG.warn("testDetachedPerJobYarnClusterInternal: Exception while deleting the JobManager address file", e);
}
}
LOG.info("Finished testDetachedMode()");
}