本文整理汇总了Java中org.apache.hadoop.yarn.api.ApplicationConstants.LOG_DIR_EXPANSION_VAR属性的典型用法代码示例。如果您正苦于以下问题:Java ApplicationConstants.LOG_DIR_EXPANSION_VAR属性的具体用法?Java ApplicationConstants.LOG_DIR_EXPANSION_VAR怎么用?Java ApplicationConstants.LOG_DIR_EXPANSION_VAR使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.hadoop.yarn.api.ApplicationConstants
的用法示例。
在下文中一共展示了ApplicationConstants.LOG_DIR_EXPANSION_VAR属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: makeContainerCommand
private String makeContainerCommand(long containerMemory, String clusterSpec,
String jobName, int taskIndex) {
String[] commands = new String[]{
ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java",
"-Xmx" + containerMemory + "m",
TFServerRunner.class.getName() + " ",
Utils.mkOption(Constants.OPT_CLUSTER_SPEC, clusterSpec),
Utils.mkOption(Constants.OPT_JOB_NAME, jobName),
Utils.mkOption(Constants.OPT_TASK_INDEX, taskIndex),
"1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/TFServerRunner." +
ApplicationConstants.STDOUT,
"2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/TFServerRunner." +
ApplicationConstants.STDERR
};
return Utils.mkString(commands, " ");
}
示例2: makeAppMasterCommand
private String makeAppMasterCommand(String tfLib, String tfJar) {
String[] commands = new String[]{
ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java",
// Set Xmx based on am memory size
"-Xmx" + amMemory + "m",
// Set class name
ApplicationMaster.class.getName(),
Utils.mkOption(Constants.OPT_TF_CONTAINER_MEMORY, containerMemory),
Utils.mkOption(Constants.OPT_TF_CONTAINER_VCORES, containerVCores),
Utils.mkOption(Constants.OPT_TF_WORKER_NUM, workerNum),
Utils.mkOption(Constants.OPT_TF_PS_NUM, psNum),
Utils.mkOption(Constants.OPT_TF_LIB, tfLib),
Utils.mkOption(Constants.OPT_TF_JAR, tfJar),
"1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout",
"2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"
};
return Utils.mkString(commands, " ");
}
示例3: testEnvExpansion
@Test(timeout = 10000)
public void testEnvExpansion() throws IOException {
Path logPath = new Path("/nm/container/logs");
String input =
Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/*"
+ ApplicationConstants.CLASS_PATH_SEPARATOR
+ Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/lib/*"
+ ApplicationConstants.CLASS_PATH_SEPARATOR
+ Apps.crossPlatformify("HADOOP_LOG_HOME")
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR;
String res = ContainerLaunch.expandEnvironment(input, logPath);
if (Shell.WINDOWS) {
Assert.assertEquals("%HADOOP_HOME%/share/hadoop/common/*;"
+ "%HADOOP_HOME%/share/hadoop/common/lib/*;"
+ "%HADOOP_LOG_HOME%/nm/container/logs", res);
} else {
Assert.assertEquals("$HADOOP_HOME/share/hadoop/common/*:"
+ "$HADOOP_HOME/share/hadoop/common/lib/*:"
+ "$HADOOP_LOG_HOME/nm/container/logs", res);
}
System.out.println(res);
}
示例4: launchDummyTask
private synchronized void launchDummyTask(Container container){
ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
String new_command = "./launcher.py";
String cmd = new_command + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
+ "/stderr";
ctx.setCommands(Collections.singletonList(cmd));
ctx.setTokens(setupTokens());
ctx.setLocalResources(this.workerResources);
synchronized (this){
this.nmClient.startContainerAsync(container, ctx);
}
}
示例5: run
private void run() throws Exception {
Configuration conf = new Configuration();
YarnConfiguration yconf = new YarnConfiguration(conf);
YarnClientRMConnection crmc = new YarnClientRMConnection(yconf);
YarnApplication app = crmc.createApplication(options.appName);
ContainerLaunchContext clCtx = app.getContainerLaunchContext();
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
File amZipFile = new File(System.getProperty("basedir") + "/hyracks-yarn-am/hyracks-yarn-am.zip");
localResources.put("archive", LocalResourceHelper.createArchiveResource(conf, amZipFile));
clCtx.setLocalResources(localResources);
String command = "./archive/bin/hyracks-yarn-am 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
List<String> commands = new ArrayList<String>();
commands.add(command);
clCtx.setCommands(commands);
clCtx.setResource(ResourceHelper.createMemoryCapability(options.amMemory));
app.submit();
}
示例6: YarnSubmissionHelper
public YarnSubmissionHelper(final YarnConfiguration yarnConfiguration,
final REEFFileNames fileNames,
final ClasspathProvider classpath,
final YarnProxyUser yarnProxyUser,
final SecurityTokenProvider tokenProvider,
final boolean isUnmanaged,
final List<String> commandPrefixList) throws IOException, YarnException {
this.classpath = classpath;
this.yarnProxyUser = yarnProxyUser;
this.isUnmanaged = isUnmanaged;
this.driverStdoutFilePath =
ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" + fileNames.getDriverStdoutFileName();
this.driverStderrFilePath =
ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" + fileNames.getDriverStderrFileName();
LOG.log(Level.FINE, "Initializing YARN Client");
this.yarnClient = YarnClient.createYarnClient();
this.yarnClient.init(yarnConfiguration);
this.yarnClient.start();
LOG.log(Level.FINE, "Initialized YARN Client");
LOG.log(Level.FINE, "Requesting Application ID from YARN.");
final YarnClientApplication yarnClientApplication = this.yarnClient.createApplication();
this.applicationResponse = yarnClientApplication.getNewApplicationResponse();
this.applicationSubmissionContext = yarnClientApplication.getApplicationSubmissionContext();
this.applicationSubmissionContext.setUnmanagedAM(isUnmanaged);
this.applicationId = this.applicationSubmissionContext.getApplicationId();
this.tokenProvider = tokenProvider;
this.commandPrefixList = commandPrefixList;
this.configurationFilePaths = Collections.singletonList(fileNames.getDriverConfigurationPath());
LOG.log(Level.INFO, "YARN Application ID: {0}", this.applicationId);
}
示例7: getTaskLogFile
private static String getTaskLogFile(LogName filter) {
return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR + filter.toString();
}
示例8: getTaskLogFile
private static String getTaskLogFile(LogName filter) {
return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR +
filter.toString();
}
示例9: run
private void run(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("Usage: [options] [commands..]");
System.out.println("options: [-file filename] [-appcp appClasspath]");
return;
}
this.initArgs(args);
// Create yarnClient
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// Create application via yarnClient
YarnClientApplication app = yarnClient.createApplication();
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records
.newRecord(ContainerLaunchContext.class);
ApplicationSubmissionContext appContext = app
.getApplicationSubmissionContext();
// Submit application
ApplicationId appId = appContext.getApplicationId();
//add ctrl+c signal handler
CtrlCHandler handler = new CtrlCHandler(appId, yarnClient);
Signal intSignal = new Signal("INT");
Signal.handle(intSignal, handler);
// setup security token
amContainer.setTokens(this.setupTokens());
// setup cache-files and environment variables
amContainer.setLocalResources(this.setupCacheFiles(appId));
amContainer.setEnvironment(this.getEnvironment());
String cmd = Environment.JAVA_HOME.$$() + "/bin/java"
+ " -Xmx900m"
+ " org.apache.hadoop.yarn.dmlc.ApplicationMaster"
+ this.cacheFileArg + ' ' + this.appArgs + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
LOG.debug(cmd);
amContainer.setCommands(Collections.singletonList(cmd));
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(1024);
capability.setVirtualCores(1);
LOG.info("jobname=" + this.jobName + ",username=" + this.userName);
appContext.setApplicationName(jobName + ":DMLC-YARN");
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setQueue(queue);
//appContext.setUser(userName);
LOG.info("Submitting application " + appId);
yarnClient.submitApplication(appContext);
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
YarnApplicationState appState = appReport.getYarnApplicationState();
while (appState != YarnApplicationState.FINISHED
&& appState != YarnApplicationState.KILLED
&& appState != YarnApplicationState.FAILED) {
Thread.sleep(100);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
}
System.out.println("Application " + appId + " finished with"
+ " state " + appState + " at " + appReport.getFinishTime());
if (!appReport.getFinalApplicationStatus().equals(
FinalApplicationStatus.SUCCEEDED)) {
System.err.println(appReport.getDiagnostics());
System.out.println("Available queues:");
for (QueueInfo q : yarnClient.getAllQueues()) {
System.out.println(q.getQueueName());
}
yarnClient.killApplication(appId);
}
}
示例10: setupApplicationMasterContainer
protected ContainerLaunchContext setupApplicationMasterContainer(
String yarnClusterEntrypoint,
boolean hasLogback,
boolean hasLog4j,
boolean hasKrb5,
int jobManagerMemoryMb) {
// ------------------ Prepare Application Master Container ------------------------------
// respect custom JVM options in the YAML file
String javaOpts = flinkConfiguration.getString(CoreOptions.FLINK_JVM_OPTIONS);
if (flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS).length() > 0) {
javaOpts += " " + flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS);
}
//applicable only for YarnMiniCluster secure test run
//krb5.conf file will be available as local resource in JM/TM container
if (hasKrb5) {
javaOpts += " -Djava.security.krb5.conf=krb5.conf";
}
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
final Map<String, String> startCommandValues = new HashMap<>();
startCommandValues.put("java", "$JAVA_HOME/bin/java");
startCommandValues.put("jvmmem", "-Xmx" +
Utils.calculateHeapSize(jobManagerMemoryMb, flinkConfiguration) +
"m");
startCommandValues.put("jvmopts", javaOpts);
String logging = "";
if (hasLogback || hasLog4j) {
logging = "-Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.log\"";
if (hasLogback) {
logging += " -Dlogback.configurationFile=file:" + CONFIG_FILE_LOGBACK_NAME;
}
if (hasLog4j) {
logging += " -Dlog4j.configuration=file:" + CONFIG_FILE_LOG4J_NAME;
}
}
startCommandValues.put("logging", logging);
startCommandValues.put("class", yarnClusterEntrypoint);
startCommandValues.put("redirects",
"1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out " +
"2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err");
startCommandValues.put("args", "");
final String commandTemplate = flinkConfiguration
.getString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE,
ConfigConstants.DEFAULT_YARN_CONTAINER_START_COMMAND_TEMPLATE);
final String amCommand =
BootstrapTools.getStartCommand(commandTemplate, startCommandValues);
amContainer.setCommands(Collections.singletonList(amCommand));
LOG.debug("Application Master start command: " + amCommand);
return amContainer;
}
示例11: setupApplicationMasterContainer
protected ContainerLaunchContext setupApplicationMasterContainer(
boolean hasLogback, boolean hasLog4j) {
// ------------------ Prepare Application Master Container ------------------------------
// respect custom JVM options in the YAML file
final String javaOpts = flinkConfiguration.getString(
ConfigConstants.FLINK_JVM_OPTIONS, "");
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records.newRecord(
ContainerLaunchContext.class);
String amCommand = "$JAVA_HOME/bin/java"
+ " -Xmx" + Utils.calculateHeapSize(jobManagerMemoryMb,
flinkConfiguration)
+ "M " + javaOpts;
if (hasLogback || hasLog4j) {
amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
+ "/jobmanager.log\"";
if (hasLogback) {
amCommand += " -Dlogback.configurationFile=file:"
+ CONFIG_FILE_LOGBACK_NAME;
}
if (hasLog4j) {
amCommand += " -Dlog4j.configuration=file:" + CONFIG_FILE_LOG4J_NAME;
}
}
//Loop through Hopsworks properties and add them to env
for (String envProperty : hopsworksParams) {
amCommand += " " + envProperty.replace("\'", "");
}
amCommand += " " + getApplicationMasterClass().getName() + " "
+ " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
+ "/jobmanager.err";
amContainer.setCommands(Collections.singletonList(amCommand));
LOG.debug("Application Master start command: " + amCommand);
return amContainer;
}
示例12: run
@Override
public void run() {
LOG.info("Setting up container launch container for containerid="
+ container.getId());
// Set the local resources
Map<String, LocalResource> localResources = new HashMap<>();
LOG.info("Local Directory Contents");
for(File f : new File(".").listFiles()) {
LOG.info(" " + f.length() + " - " + f.getName() );
}
LOG.info("Localizing " + request.getPath());
String modelScript = localizeResources(localResources, new Path(request.getPath()), appJarLocation);
for(Map.Entry<String, LocalResource> entry : localResources.entrySet()) {
LOG.info(entry.getKey() + " localized: " + entry.getValue().getResource() );
}
// The container for the eventual shell commands needs its own local
// resources too.
// In this scenario, if a shell script is specified, we need to have it
// copied and made available to the container.
// Set the necessary command to execute on the allocated container
Map<String, String> env = new HashMap<>();
// For example, we could setup the classpath needed.
// Assuming our classes or jars are available as local resources in the
// working directory from which the command will be run, we need to append
// "." to the path.
// By default, all the hadoop specific classpaths will already be available
// in $CLASSPATH, so we should be careful not to overwrite it.
StringBuffer classPathEnv = new StringBuffer("$CLASSPATH:./*:");
//if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
classPathEnv.append(System.getProperty("java.class.path"));
//}
env.put("CLASSPATH", classPathEnv.toString());
// Construct the command to be executed on the launched container
String command = ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java "
+ Runner.class.getName() + " "
+ RunnerOptions.toArgs(RunnerOptions.CONTAINER_ID.of(container.getId().getContainerId() + "")
,RunnerOptions.ZK_QUORUM.of(zkQuorum)
,RunnerOptions.ZK_ROOT.of(zkRoot)
,RunnerOptions.SCRIPT.of(modelScript)
,RunnerOptions.NAME.of(request.getName())
,RunnerOptions.HOSTNAME.of(containerHostname())
,RunnerOptions.VERSION.of(request.getVersion())
)
+ " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
List<String> commands = new ArrayList<String>();
LOG.info("Executing container command: " + command);
commands.add(command);
// Set up ContainerLaunchContext, setting local resource, environment,
// command and token for constructor.
// Note for tokens: Set up tokens for the container too. Today, for normal
// shell commands, the container in distribute-shell doesn't need any
// tokens. We are populating them mainly for NodeManagers to be able to
// download anyfiles in the distributed file-system. The tokens are
// otherwise also useful in cases, for e.g., when one is running a
// "hadoop dfs" command inside the distributed shell.
ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(
localResources, env, commands, null, allTokens.duplicate(), null);
//TODO: Add container to listener so it can be removed
nmClientAsync.startContainerAsync(container, ctx);
}
示例13: allocateOutstandingContainer
/**
* Run a Thread to allocate new containers until taskManagerCount
* is correct again.
*/
private void allocateOutstandingContainer(StringBuffer containerDiag) throws Exception {
// respect custom JVM options in the YAML file
final String javaOpts = GlobalConfiguration.getString(ConfigConstants.FLINK_JVM_OPTIONS, "");
int allocatedContainers = 0;
while (allocatedContainers < taskManagerCount) {
AllocateResponse response = rmClient.allocate(0);
for (Container container : response.getAllocatedContainers()) {
LOG.info("Got new Container for TM "+container.getId()+" on host "+container.getNodeId().getHost());
++allocatedContainers;
// Launch container by create ContainerLaunchContext
ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
String tmCommand = "$JAVA_HOME/bin/java -Xmx"+heapLimit+"m " + javaOpts ;
if(hasLogback || hasLog4j) {
tmCommand += " -Dlog.file=\""+ApplicationConstants.LOG_DIR_EXPANSION_VAR +"/taskmanager.log\"";
}
if(hasLogback) {
tmCommand += " -Dlogback.configurationFile=file:logback.xml";
}
if(hasLog4j) {
tmCommand += " -Dlog4j.configuration=file:log4j.properties";
}
tmCommand += " "+YarnTaskManagerRunner.class.getName()+" -configDir . "
+ " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR
+ "/taskmanager-stdout.log"
+ " 2>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR
+ "/taskmanager-stderr.log";
ctx.setCommands(Collections.singletonList(tmCommand));
LOG.info("Starting TM with command="+tmCommand);
ctx.setLocalResources(taskManagerLocalResources);
// Setup CLASSPATH for Container (=TaskTracker)
Map<String, String> containerEnv = new HashMap<String, String>();
Utils.setupEnv(conf, containerEnv); //add flink.jar to class path.
containerEnv.put(Client.ENV_CLIENT_USERNAME, yarnClientUsername);
ctx.setEnvironment(containerEnv);
UserGroupInformation user = UserGroupInformation.getCurrentUser();
try {
Credentials credentials = user.getCredentials();
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(),
0, dob.getLength());
ctx.setTokens(securityTokens);
} catch (IOException e) {
LOG.warn("Getting current user info failed when trying to launch the container", e);
}
LOG.info("Launching container " + allocatedContainers);
nmClient.startContainer(container, ctx);
}
for (ContainerStatus status : response.getCompletedContainersStatuses()) {
++completedContainers;
LOG.info("Completed container (while allocating) "+status.getContainerId()+". Total Completed:" + completedContainers);
LOG.info("Diagnostics "+status.getDiagnostics());
// status.
logDeadContainer(status, containerDiag);
}
Thread.sleep(100);
}
}
示例14: getTaskLogFile
private static String getTaskLogFile(LogName filter) {
return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR +
filter.toString();
}