本文整理汇总了Java中org.apache.hadoop.yarn.api.records.ApplicationId.getId方法的典型用法代码示例。如果您正苦于以下问题:Java ApplicationId.getId方法的具体用法?Java ApplicationId.getId怎么用?Java ApplicationId.getId使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.yarn.api.records.ApplicationId
的用法示例。
在下文中一共展示了ApplicationId.getId方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initializeApplication
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Override
public void initializeApplication(ApplicationInitializationContext context) {
String user = context.getUser();
ApplicationId appId = context.getApplicationId();
ByteBuffer secret = context.getApplicationDataForService();
// TODO these bytes should be versioned
try {
Token<JobTokenIdentifier> jt = deserializeServiceData(secret);
// TODO: Once SHuffle is out of NM, this can use MR APIs
JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
recordJobShuffleInfo(jobId, user, jt);
} catch (IOException e) {
LOG.error("Error during initApp", e);
// TODO add API to AuxiliaryServices to report failures
}
}
示例2: findHistoryFilePath
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
public static Optional<String> findHistoryFilePath(Iterator<LocatedFileStatus> listing,
ApplicationId applicationId) {
JobID jobId = new JobID(String.valueOf(applicationId.getClusterTimestamp()), applicationId.getId());
List<LocatedFileStatus> jhistFiles = Lists.newArrayList();
// maybe this could work more nicely with some recursive glob and a filter
try {
jhistFiles = StreamSupport
.stream(Spliterators.spliteratorUnknownSize(listing, Spliterator.NONNULL), false)
.filter(fstatus -> fstatus.getPath().toString()
.matches(".*" + jobId.toString() + ".*.jhist"))
.collect(Collectors.toList());
} catch (RemoteIteratorAdaptor.WrappedRemoteIteratorException wrie) {
// We can't really do overly much at this point, as this is an error from the
// underlying hadoop filesystem implementation. But we want to at least log this
// separately from other conditions.
logger.error("Retrieving remote listing failed", wrie);
}
if (jhistFiles.size() < 1) {
logger.error("Could not locate a history file for parameters");
return Optional.empty();
} else if (jhistFiles.size() > 1) {
logger.error("Found two or more matching files, will dump first");
}
return jhistFiles.stream()
.findFirst()
.map(x -> x.getPath().toString());
}
示例3: stopApplication
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Override
public void stopApplication(ApplicationTerminationContext context) {
ApplicationId appId = context.getApplicationId();
JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
try {
removeJobShuffleInfo(jobId);
} catch (IOException e) {
LOG.error("Error during stopApp", e);
// TODO add API to AuxiliaryServices to report failures
}
}
示例4: forceKillApplication
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Override
public boolean forceKillApplication(ApplicationId applicationId)
throws IOException {
int jobid = applicationId.getId();
String scancelCmd = conf.get(
HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_SLURM_SCANCEL,
HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_SLURM_SCANCEL);
Shell.execCommand(scancelCmd, String.valueOf(jobid));
return true;
}
示例5: submitApplication
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Override
public void submitApplication(ApplicationSubmissionContext context)
throws IOException {
int waitingTime = conf.getInt(
HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_RS_MAX_WAIT_MS,
HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_RS_MAX_WAIT_MS);
int noOfTimes = conf.getInt(
HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_RS_RETRIES_MAX,
HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_RS_RETRIES_MAX);
ApplicationId applicationId = context.getApplicationId();
String applicationName = context.getApplicationName();
SocketWrapper socket = SocketCache.getSocket(applicationId.getId());
if (socket.waitForReady(waitingTime * noOfTimes)) {
PBSCommandExecutor.launchContainer(
context.getAMContainerSpec(),
ContainerId.newContainerId(
ApplicationAttemptId.newInstance(applicationId, 1), 1l)
.toString(), applicationName, conf, applicationId.getId(), true,
socket.getContainerHostName());
}
// Set the Job Name
int jobid = applicationId.getId();
String pbsJobName = applicationName.replaceAll("\\s", "");
if (pbsJobName.length() > 13) {
pbsJobName = pbsJobName.substring(0, 12);
}
String qalterCmd = conf.get(
HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_PBS_QALTER,
HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_PBS_QALTER);
Shell
.execCommand(qalterCmd, String.valueOf(jobid), "-N", "Y#" + pbsJobName);
}
示例6: forceKillApplication
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Override
public boolean forceKillApplication(ApplicationId applicationId)
throws IOException {
int jobid = applicationId.getId();
String qdelCmd = conf.get(
HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_PBS_QDEL,
HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_PBS_QDEL);
Shell.execCommand(qdelCmd, String.valueOf(jobid));
return true;
}
示例7: fromYarn
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
public static org.apache.hadoop.mapreduce.JobID fromYarn(ApplicationId appID) {
String identifier = fromClusterTimeStamp(appID.getClusterTimestamp());
return new org.apache.hadoop.mapred.JobID(identifier, appID.getId());
}