本文整理匯總了Java中org.apache.hadoop.yarn.conf.YarnConfiguration.get方法的典型用法代碼示例。如果您正苦於以下問題:Java YarnConfiguration.get方法的具體用法?Java YarnConfiguration.get怎麽用?Java YarnConfiguration.get使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.yarn.conf.YarnConfiguration
的用法示例。
在下文中一共展示了YarnConfiguration.get方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: findRedirectUrl
import org.apache.hadoop.yarn.conf.YarnConfiguration; //導入方法依賴的package包/類
protected String findRedirectUrl() throws ServletException {
String addr;
if (proxyUriBases.size() == 1) { // external proxy or not RM HA
addr = proxyUriBases.values().iterator().next();
} else { // RM HA
YarnConfiguration conf = new YarnConfiguration();
String activeRMId = RMHAUtils.findActiveRMHAId(conf);
String addressPropertyPrefix = YarnConfiguration.useHttps(conf)
? YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS
: YarnConfiguration.RM_WEBAPP_ADDRESS;
String host = conf.get(
HAUtil.addSuffix(addressPropertyPrefix, activeRMId));
addr = proxyUriBases.get(host);
}
if (addr == null) {
throw new ServletException(
"Could not determine the proxy server for redirection");
}
return addr;
}
示例2: startTwillRunner
import org.apache.hadoop.yarn.conf.YarnConfiguration; //導入方法依賴的package包/類
public TwillRunnerService startTwillRunner(YarnConfiguration yarnConfiguration) {
String zkStr = dremioConfig.getString(DremioConfig.ZOOKEEPER_QUORUM);
String clusterId = yarnConfiguration.get(YARN_CLUSTER_ID);
Preconditions.checkNotNull(clusterId, "Cluster ID can not be null");
TwillRunnerService twillRunner = new YarnTwillRunnerService(yarnConfiguration, zkStr);
TwillRunnerService previousOne = twillRunners.putIfAbsent(new ClusterId(clusterId), twillRunner);
if (previousOne == null) {
// start one we are planning to add - if it is already in collection it should be started
twillRunner.start();
return twillRunner;
}
return previousOne;
}
示例3: run
import org.apache.hadoop.yarn.conf.YarnConfiguration; //導入方法依賴的package包/類
public void run(String[] args) throws Exception {
YarnConfiguration conf = new YarnConfiguration();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
String cryptopath = "";
final String zonepaths = args[0];
final String exclasspath = args[1];
final String globalKMS = args.length == 2 ? "localhost:16000" : args[2];
final String pluginURI = conf.get(NuCypherExtRpcServer.NUCYPHER_EXT_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
InetSocketAddress.createUnresolved(new URI(conf.get("fs.defaultFS")).getHost(),
NuCypherExtRpcServer.DEFAULT_PORT).toString());
String providers = conf.get(KEY_PROVIDER_PATH);
YarnClientApplication app = yarnClient.createApplication();
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
amContainer.setCommands(
Collections.singletonList(
//"/usr/bin/java"+
Environment.JAVA_HOME.$$() + "/bin/java" +
" -Xmx1024M"+
" ApplicationMasterKMS"+
" " + zonepaths +
" " + exclasspath +
" " + globalKMS +
" " + providers +
" " + pluginURI +
" 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" +
" 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));
ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
ApplicationId appId = appContext.getApplicationId();
LocalResource appMasterJar = Records.newRecord(LocalResource.class);
// setupAppMasterJar(jarPath, appMasterJar);
Map<String, LocalResource> localResources = new HashMap<>();
//localResources.put("prkeyrotation.jar", appMasterJar);
setupExtJar(exclasspath, localResources, appId.toString());
amContainer.setLocalResources(localResources);
Map<String, String> appMasterEnv = new HashMap<String, String>();
setupAppMasterEnv(appMasterEnv, exclasspath);
amContainer.setEnvironment(appMasterEnv);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(1024);
capability.setVirtualCores(1);
appContext.setApplicationName("prkeyrotation");
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setQueue("default");
System.out.println("Submitting application "+appId);
yarnClient.submitApplication(appContext);
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
YarnApplicationState appState = appReport.getYarnApplicationState();
while (appState != YarnApplicationState.FINISHED &&
appState != YarnApplicationState.KILLED &&
appState != YarnApplicationState.FAILED) {
Thread.sleep(100);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
}
System.out.println("Application " + appId + " finished with " +
" state " + appState +
" at " + appReport.getFinishTime());
}
示例4: updateYarnConfiguration
import org.apache.hadoop.yarn.conf.YarnConfiguration; //導入方法依賴的package包/類
@VisibleForTesting
protected String updateYarnConfiguration(Cluster cluster, YarnConfiguration yarnConfiguration) {
String rmAddress = null;
// make sure we set defaults first - before we overwrite it with props from ClusterConfig
setYarnDefaults(cluster, yarnConfiguration);
List<Property> keyValues = cluster.getClusterConfig().getSubPropertyList();
if ( keyValues != null && !keyValues.isEmpty()) {
for (Property property : keyValues) {
yarnConfiguration.set(property.getKey(), property.getValue());
if (RM_HOSTNAME.equalsIgnoreCase(property.getKey())) {
rmAddress = property.getValue();
}
}
}
String queue = cluster.getClusterConfig().getClusterSpec().getQueue();
if (queue != null && !queue.isEmpty()) {
yarnConfiguration.set(DacDaemonYarnApplication.YARN_QUEUE_NAME, queue);
}
Integer memoryOnHeap = cluster.getClusterConfig().getClusterSpec().getMemoryMBOnHeap();
if (memoryOnHeap != null) {
yarnConfiguration.setInt(DacDaemonYarnApplication.YARN_MEMORY_ON_HEAP, memoryOnHeap.intValue());
}
Integer memoryOffHeap = cluster.getClusterConfig().getClusterSpec().getMemoryMBOffHeap();
if (memoryOffHeap != null) {
yarnConfiguration.setInt(DacDaemonYarnApplication.YARN_MEMORY_OFF_HEAP, memoryOffHeap.intValue());
yarnConfiguration.setInt(JAVA_RESERVED_MEMORY_MB, memoryOffHeap.intValue());
if (yarnConfiguration.get(HEAP_RESERVED_MIN_RATIO) == null) {
// ratio between onheap and total memory. Since we need more direct memory
// trying to make this ratio small, so heap would be as specified
// until it is < 10% of the total memory
// Can be overidden by yarn configuration
yarnConfiguration.setDouble(HEAP_RESERVED_MIN_RATIO, 0.1D);
}
}
Integer cpu = cluster.getClusterConfig().getClusterSpec().getVirtualCoreCount();
if (cpu != null) {
yarnConfiguration.setInt(DacDaemonYarnApplication.YARN_CPU, cpu.intValue());
}
Integer containerCount = cluster.getClusterConfig().getClusterSpec().getContainerCount();
if (containerCount != null) {
yarnConfiguration.setInt(DacDaemonYarnApplication.YARN_CONTAINER_COUNT, containerCount.intValue());
}
String clusterName = cluster.getClusterConfig().getName();
if (clusterName != null) {
yarnConfiguration.set(DacDaemonYarnApplication.YARN_APP_NAME, clusterName);
}
yarnConfiguration.set(YARN_CLUSTER_ID, cluster.getId().getId());
return rmAddress;
}
示例5: createPreparer
import org.apache.hadoop.yarn.conf.YarnConfiguration; //導入方法依賴的package包/類
protected TwillPreparer createPreparer(YarnConfiguration yarnConfiguration, List<Property> propertyList) {
BundledJarRunner.Arguments discoveryArgs = new BundledJarRunner.Arguments.Builder()
.setJarFileName(YARN_BUNDLED_JAR_NAME)
.setLibFolder("/lib")
.setMainClassName("com.dremio.dac.daemon.DremioDaemon")
.setMainArgs(new String[] {})
.createArguments();
DacDaemonYarnApplication dacDaemonApp = new DacDaemonYarnApplication(dremioConfig, yarnConfiguration,
new DacDaemonYarnApplication.Environment());
File jarFile = new File(dacDaemonApp.getYarnBundledJarName());
Preconditions.checkState(jarFile.exists());
Preconditions.checkState(jarFile.canRead());
TwillRunnerService twillRunner = startTwillRunner(yarnConfiguration);
Map<String, String> envVars = Maps.newHashMap();
envVars.put("MALLOC_ARENA_MAX", "4");
envVars.put("MALLOC_MMAP_THRESHOLD_", "131072");
envVars.put("MALLOC_TRIM_THRESHOLD_", "131072");
envVars.put("MALLOC_TOP_PAD_", "131072");
envVars.put("MALLOC_MMAP_MAX_", "65536");
// maprfs specific env vars to enable read ahead throttling
envVars.put("MAPR_IMPALA_RA_THROTTLE", "true");
envVars.put("MAPR_MAX_RA_STREAMS", "800");
try {
String userName = UserGroupInformation.getCurrentUser().getUserName();
envVars.put("HADOOP_USER_NAME", userName);
} catch (IOException e) {
logger.error("Exception while trying to fill out HADOOP_USER_NAME with current user", e);
}
String[] yarnClasspath = yarnConfiguration.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH);
final TwillPreparer preparer = twillRunner.prepare(dacDaemonApp)
.addLogHandler(new YarnTwillLogHandler())
.withApplicationClassPaths(yarnClasspath)
.withBundlerClassAcceptor(new HadoopClassExcluder())
.setLogLevels(ImmutableMap.of(Logger.ROOT_LOGGER_NAME, yarnContainerLogLevel()))
.withEnv(envVars)
.withMaxRetries(YARN_RUNNABLE_NAME, MAX_APP_RESTART_RETRIES)
.withArguments(YARN_RUNNABLE_NAME, discoveryArgs.toArray());
for (String classpathJar : dacDaemonApp.getJarNames()) {
preparer.withClassPaths(classpathJar);
}
preparer.addJVMOptions(prepareCommandOptions(yarnConfiguration, propertyList));
String queue = yarnConfiguration.get(DacDaemonYarnApplication.YARN_QUEUE_NAME);
if (queue != null) {
preparer.setSchedulerQueue(queue);
}
if (dremioConfig.getBoolean(DremioConfig.DEBUG_YARN_ENABLED)) {
preparer.enableDebugging(true, YARN_RUNNABLE_NAME);
}
return preparer;
}