本文整理汇总了Java中org.apache.hadoop.yarn.conf.YarnConfiguration.get方法的典型用法代码示例。如果您正苦于以下问题:Java YarnConfiguration.get方法的具体用法?Java YarnConfiguration.get怎么用?Java YarnConfiguration.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.yarn.conf.YarnConfiguration
的用法示例。
在下文中一共展示了YarnConfiguration.get方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: findRedirectUrl
import org.apache.hadoop.yarn.conf.YarnConfiguration; //导入方法依赖的package包/类
protected String findRedirectUrl() throws ServletException {
String addr;
if (proxyUriBases.size() == 1) { // external proxy or not RM HA
addr = proxyUriBases.values().iterator().next();
} else { // RM HA
YarnConfiguration conf = new YarnConfiguration();
String activeRMId = RMHAUtils.findActiveRMHAId(conf);
String addressPropertyPrefix = YarnConfiguration.useHttps(conf)
? YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS
: YarnConfiguration.RM_WEBAPP_ADDRESS;
String host = conf.get(
HAUtil.addSuffix(addressPropertyPrefix, activeRMId));
addr = proxyUriBases.get(host);
}
if (addr == null) {
throw new ServletException(
"Could not determine the proxy server for redirection");
}
return addr;
}
示例2: startTwillRunner
import org.apache.hadoop.yarn.conf.YarnConfiguration; //导入方法依赖的package包/类
public TwillRunnerService startTwillRunner(YarnConfiguration yarnConfiguration) {
String zkStr = dremioConfig.getString(DremioConfig.ZOOKEEPER_QUORUM);
String clusterId = yarnConfiguration.get(YARN_CLUSTER_ID);
Preconditions.checkNotNull(clusterId, "Cluster ID can not be null");
TwillRunnerService twillRunner = new YarnTwillRunnerService(yarnConfiguration, zkStr);
TwillRunnerService previousOne = twillRunners.putIfAbsent(new ClusterId(clusterId), twillRunner);
if (previousOne == null) {
// start one we are planning to add - if it is already in collection it should be started
twillRunner.start();
return twillRunner;
}
return previousOne;
}
示例3: run
import org.apache.hadoop.yarn.conf.YarnConfiguration; //导入方法依赖的package包/类
public void run(String[] args) throws Exception {
YarnConfiguration conf = new YarnConfiguration();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
String cryptopath = "";
final String zonepaths = args[0];
final String exclasspath = args[1];
final String globalKMS = args.length == 2 ? "localhost:16000" : args[2];
final String pluginURI = conf.get(NuCypherExtRpcServer.NUCYPHER_EXT_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
InetSocketAddress.createUnresolved(new URI(conf.get("fs.defaultFS")).getHost(),
NuCypherExtRpcServer.DEFAULT_PORT).toString());
String providers = conf.get(KEY_PROVIDER_PATH);
YarnClientApplication app = yarnClient.createApplication();
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
amContainer.setCommands(
Collections.singletonList(
//"/usr/bin/java"+
Environment.JAVA_HOME.$$() + "/bin/java" +
" -Xmx1024M"+
" ApplicationMasterKMS"+
" " + zonepaths +
" " + exclasspath +
" " + globalKMS +
" " + providers +
" " + pluginURI +
" 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" +
" 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));
ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
ApplicationId appId = appContext.getApplicationId();
LocalResource appMasterJar = Records.newRecord(LocalResource.class);
// setupAppMasterJar(jarPath, appMasterJar);
Map<String, LocalResource> localResources = new HashMap<>();
//localResources.put("prkeyrotation.jar", appMasterJar);
setupExtJar(exclasspath, localResources, appId.toString());
amContainer.setLocalResources(localResources);
Map<String, String> appMasterEnv = new HashMap<String, String>();
setupAppMasterEnv(appMasterEnv, exclasspath);
amContainer.setEnvironment(appMasterEnv);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(1024);
capability.setVirtualCores(1);
appContext.setApplicationName("prkeyrotation");
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setQueue("default");
System.out.println("Submitting application "+appId);
yarnClient.submitApplication(appContext);
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
YarnApplicationState appState = appReport.getYarnApplicationState();
while (appState != YarnApplicationState.FINISHED &&
appState != YarnApplicationState.KILLED &&
appState != YarnApplicationState.FAILED) {
Thread.sleep(100);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
}
System.out.println("Application " + appId + " finished with " +
" state " + appState +
" at " + appReport.getFinishTime());
}
示例4: updateYarnConfiguration
import org.apache.hadoop.yarn.conf.YarnConfiguration; //导入方法依赖的package包/类
@VisibleForTesting
protected String updateYarnConfiguration(Cluster cluster, YarnConfiguration yarnConfiguration) {
String rmAddress = null;
// make sure we set defaults first - before we overwrite it with props from ClusterConfig
setYarnDefaults(cluster, yarnConfiguration);
List<Property> keyValues = cluster.getClusterConfig().getSubPropertyList();
if ( keyValues != null && !keyValues.isEmpty()) {
for (Property property : keyValues) {
yarnConfiguration.set(property.getKey(), property.getValue());
if (RM_HOSTNAME.equalsIgnoreCase(property.getKey())) {
rmAddress = property.getValue();
}
}
}
String queue = cluster.getClusterConfig().getClusterSpec().getQueue();
if (queue != null && !queue.isEmpty()) {
yarnConfiguration.set(DacDaemonYarnApplication.YARN_QUEUE_NAME, queue);
}
Integer memoryOnHeap = cluster.getClusterConfig().getClusterSpec().getMemoryMBOnHeap();
if (memoryOnHeap != null) {
yarnConfiguration.setInt(DacDaemonYarnApplication.YARN_MEMORY_ON_HEAP, memoryOnHeap.intValue());
}
Integer memoryOffHeap = cluster.getClusterConfig().getClusterSpec().getMemoryMBOffHeap();
if (memoryOffHeap != null) {
yarnConfiguration.setInt(DacDaemonYarnApplication.YARN_MEMORY_OFF_HEAP, memoryOffHeap.intValue());
yarnConfiguration.setInt(JAVA_RESERVED_MEMORY_MB, memoryOffHeap.intValue());
if (yarnConfiguration.get(HEAP_RESERVED_MIN_RATIO) == null) {
// ratio between onheap and total memory. Since we need more direct memory
// trying to make this ratio small, so heap would be as specified
// until it is < 10% of the total memory
// Can be overidden by yarn configuration
yarnConfiguration.setDouble(HEAP_RESERVED_MIN_RATIO, 0.1D);
}
}
Integer cpu = cluster.getClusterConfig().getClusterSpec().getVirtualCoreCount();
if (cpu != null) {
yarnConfiguration.setInt(DacDaemonYarnApplication.YARN_CPU, cpu.intValue());
}
Integer containerCount = cluster.getClusterConfig().getClusterSpec().getContainerCount();
if (containerCount != null) {
yarnConfiguration.setInt(DacDaemonYarnApplication.YARN_CONTAINER_COUNT, containerCount.intValue());
}
String clusterName = cluster.getClusterConfig().getName();
if (clusterName != null) {
yarnConfiguration.set(DacDaemonYarnApplication.YARN_APP_NAME, clusterName);
}
yarnConfiguration.set(YARN_CLUSTER_ID, cluster.getId().getId());
return rmAddress;
}
示例5: createPreparer
import org.apache.hadoop.yarn.conf.YarnConfiguration; //导入方法依赖的package包/类
protected TwillPreparer createPreparer(YarnConfiguration yarnConfiguration, List<Property> propertyList) {
BundledJarRunner.Arguments discoveryArgs = new BundledJarRunner.Arguments.Builder()
.setJarFileName(YARN_BUNDLED_JAR_NAME)
.setLibFolder("/lib")
.setMainClassName("com.dremio.dac.daemon.DremioDaemon")
.setMainArgs(new String[] {})
.createArguments();
DacDaemonYarnApplication dacDaemonApp = new DacDaemonYarnApplication(dremioConfig, yarnConfiguration,
new DacDaemonYarnApplication.Environment());
File jarFile = new File(dacDaemonApp.getYarnBundledJarName());
Preconditions.checkState(jarFile.exists());
Preconditions.checkState(jarFile.canRead());
TwillRunnerService twillRunner = startTwillRunner(yarnConfiguration);
Map<String, String> envVars = Maps.newHashMap();
envVars.put("MALLOC_ARENA_MAX", "4");
envVars.put("MALLOC_MMAP_THRESHOLD_", "131072");
envVars.put("MALLOC_TRIM_THRESHOLD_", "131072");
envVars.put("MALLOC_TOP_PAD_", "131072");
envVars.put("MALLOC_MMAP_MAX_", "65536");
// maprfs specific env vars to enable read ahead throttling
envVars.put("MAPR_IMPALA_RA_THROTTLE", "true");
envVars.put("MAPR_MAX_RA_STREAMS", "800");
try {
String userName = UserGroupInformation.getCurrentUser().getUserName();
envVars.put("HADOOP_USER_NAME", userName);
} catch (IOException e) {
logger.error("Exception while trying to fill out HADOOP_USER_NAME with current user", e);
}
String[] yarnClasspath = yarnConfiguration.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH);
final TwillPreparer preparer = twillRunner.prepare(dacDaemonApp)
.addLogHandler(new YarnTwillLogHandler())
.withApplicationClassPaths(yarnClasspath)
.withBundlerClassAcceptor(new HadoopClassExcluder())
.setLogLevels(ImmutableMap.of(Logger.ROOT_LOGGER_NAME, yarnContainerLogLevel()))
.withEnv(envVars)
.withMaxRetries(YARN_RUNNABLE_NAME, MAX_APP_RESTART_RETRIES)
.withArguments(YARN_RUNNABLE_NAME, discoveryArgs.toArray());
for (String classpathJar : dacDaemonApp.getJarNames()) {
preparer.withClassPaths(classpathJar);
}
preparer.addJVMOptions(prepareCommandOptions(yarnConfiguration, propertyList));
String queue = yarnConfiguration.get(DacDaemonYarnApplication.YARN_QUEUE_NAME);
if (queue != null) {
preparer.setSchedulerQueue(queue);
}
if (dremioConfig.getBoolean(DremioConfig.DEBUG_YARN_ENABLED)) {
preparer.enableDebugging(true, YARN_RUNNABLE_NAME);
}
return preparer;
}