本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getLong方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getLong方法的具体用法?Java Configuration.getLong怎么用?Java Configuration.getLong使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.getLong方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setConf
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public synchronized void setConf(Configuration conf) {
balancedSpaceThreshold = conf.getLong(
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY,
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT);
balancedPreferencePercent = conf.getFloat(
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY,
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT);
LOG.info("Available space volume choosing policy initialized: " +
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY +
" = " + balancedSpaceThreshold + ", " +
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
" = " + balancedPreferencePercent);
if (balancedPreferencePercent > 1.0) {
LOG.warn("The value of " + DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
" is greater than 1.0 but should be in the range 0.0 - 1.0");
}
if (balancedPreferencePercent < 0.5) {
LOG.warn("The value of " + DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
" is less than 0.5 so volumes with less available disk space will receive more block allocations");
}
}
示例2: validateConfigs
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
protected static void validateConfigs(Configuration conf) {
// validate max-attempts
int globalMaxAppAttempts =
conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
if (globalMaxAppAttempts <= 0) {
throw new YarnRuntimeException("Invalid global max attempts configuration"
+ ", " + YarnConfiguration.RM_AM_MAX_ATTEMPTS
+ "=" + globalMaxAppAttempts + ", it should be a positive integer.");
}
// validate expireIntvl >= heartbeatIntvl
long expireIntvl = conf.getLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
long heartbeatIntvl =
conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS,
YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS);
if (expireIntvl < heartbeatIntvl) {
throw new YarnRuntimeException("Nodemanager expiry interval should be no"
+ " less than heartbeat interval, "
+ YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS + "=" + expireIntvl
+ ", " + YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS + "="
+ heartbeatIntvl);
}
}
示例3: createJob
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public Job createJob(Configuration conf) throws IOException {
long numBytesToWritePerMap = conf.getLong(BYTES_PER_MAP, 10 * 1024);
long totalBytesToWrite = conf.getLong(TOTAL_BYTES, numBytesToWritePerMap);
int numMaps = (int) (totalBytesToWrite / numBytesToWritePerMap);
if (numMaps == 0 && totalBytesToWrite > 0) {
numMaps = 1;
conf.setLong(BYTES_PER_MAP, totalBytesToWrite);
}
conf.setInt(MRJobConfig.NUM_MAPS, numMaps);
Job job = Job.getInstance(conf);
job.setJarByClass(RandomTextWriterJob.class);
job.setJobName("random-text-writer");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(RandomInputFormat.class);
job.setMapperClass(RandomTextMapper.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
//FileOutputFormat.setOutputPath(job, new Path("random-output"));
job.setNumReduceTasks(0);
return job;
}
示例4: configure
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Setup the configuration for a constraint as to whether it is enabled and
* its priority
*
* @param conf
* on which to base the new configuration
* @param enabled
* <tt>true</tt> if it should be run
* @param priority
* relative to other constraints
* @return a new configuration, storable in the {@link HTableDescriptor}
*/
private static Configuration configure(Configuration conf, boolean enabled,
long priority) {
// create the configuration to actually be stored
// clone if possible, but otherwise just create an empty configuration
Configuration toWrite = conf == null ? new Configuration()
: new Configuration(conf);
// update internal properties
toWrite.setBooleanIfUnset(ENABLED_KEY, enabled);
// set if unset long
if (toWrite.getLong(PRIORITY_KEY, UNSET_PRIORITY) == UNSET_PRIORITY) {
toWrite.setLong(PRIORITY_KEY, priority);
}
return toWrite;
}
示例5: getTimeoutOnRIT
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
protected long getTimeoutOnRIT() {
// Guess timeout. Multiply the max number of regions on a server
// by how long we think one region takes opening.
Configuration conf = server.getConfiguration();
long perRegionOpenTimeGuesstimate =
conf.getLong("hbase.bulk.assignment.perregion.open.time", 1000);
int maxRegionsPerServer = 1;
for (List<HRegionInfo> regionList : bulkPlan.values()) {
int size = regionList.size();
if (size > maxRegionsPerServer) {
maxRegionsPerServer = size;
}
}
long timeout = perRegionOpenTimeGuesstimate * maxRegionsPerServer
+ conf.getLong("hbase.regionserver.rpc.startup.waittime", 60000)
+ conf.getLong("hbase.bulk.assignment.perregionserver.rpc.waittime",
30000) * bulkPlan.size();
LOG.debug("Timeout-on-RIT=" + timeout);
return timeout;
}
示例6: createBatchPool
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private void createBatchPool(Configuration conf) {
// Use the same config for keep alive as in HConnectionImplementation.getBatchPool();
int maxThreads = conf.getInt("hbase.multihconnection.threads.max", 256);
int coreThreads = conf.getInt("hbase.multihconnection.threads.core", 256);
if (maxThreads == 0) {
maxThreads = Runtime.getRuntime().availableProcessors() * 8;
}
if (coreThreads == 0) {
coreThreads = Runtime.getRuntime().availableProcessors() * 8;
}
long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60);
LinkedBlockingQueue<Runnable> workQueue =
new LinkedBlockingQueue<Runnable>(maxThreads
* conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
ThreadPoolExecutor tpe =
new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue,
Threads.newDaemonThreadFactory("MultiHConnection" + "-shared-"));
tpe.allowCoreThreadTimeOut(true);
this.batchPool = tpe;
}
示例7: TimeBoundedMultiThreadedReader
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public TimeBoundedMultiThreadedReader(LoadTestDataGenerator dataGen, Configuration conf,
TableName tableName, double verifyPercent) throws IOException {
super(dataGen, conf, tableName, verifyPercent);
long timeoutMs = conf.getLong(
String.format("%s.%s", TEST_NAME, GET_TIMEOUT_KEY), DEFAULT_GET_TIMEOUT);
timeoutNano = timeoutMs * 1000000;
LOG.info("Timeout for gets: " + timeoutMs);
String runTimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
this.runTime = conf.getLong(runTimeKey, -1);
if (this.runTime <= 0) {
throw new IllegalArgumentException("Please configure " + runTimeKey);
}
}
示例8: serviceInit
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void serviceInit(Configuration conf) throws Exception {
nmExpireInterval =
conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
configuredMaximumAllocationWaitTime =
conf.getLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS,
YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS);
createReleaseCache();
super.serviceInit(conf);
}
示例9: createDelegationTokenSecretManager
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Create delegation token secret manager
*/
private DelegationTokenSecretManager createDelegationTokenSecretManager(
Configuration conf) {
return new DelegationTokenSecretManager(conf.getLong(
DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY,
DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT),
conf.getLong(DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY,
DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT),
conf.getLong(DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT),
DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL,
conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY,
DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT),
this);
}
示例10: serviceInit
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
protected void serviceInit(Configuration conf) throws Exception {
LOG.info("JobHistory Init");
this.conf = conf;
this.appID = ApplicationId.newInstance(0, 0);
this.appAttemptID = RecordFactoryProvider.getRecordFactory(conf)
.newRecordInstance(ApplicationAttemptId.class);
moveThreadInterval = conf.getLong(
JHAdminConfig.MR_HISTORY_MOVE_INTERVAL_MS,
JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_INTERVAL_MS);
hsManager = createHistoryFileManager();
hsManager.init(conf);
try {
hsManager.initExisting();
} catch (IOException e) {
throw new YarnRuntimeException("Failed to intialize existing directories", e);
}
storage = createHistoryStorage();
if (storage instanceof Service) {
((Service) storage).init(conf);
}
storage.setHistoryFileManager(hsManager);
super.serviceInit(conf);
}
示例11: Groups
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public Groups(Configuration conf, final Timer timer) {
impl =
ReflectionUtils.newInstance(
conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
ShellBasedUnixGroupsMapping.class,
GroupMappingServiceProvider.class),
conf);
cacheTimeout =
conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS,
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000;
negativeCacheTimeout =
conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT) * 1000;
warningDeltaMs =
conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS,
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT);
parseStaticMapping(conf);
this.timer = timer;
this.cache = CacheBuilder.newBuilder()
.refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS)
.ticker(new TimerToTickerAdapter(timer))
.expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS)
.build(new GroupCacheLoader());
if(negativeCacheTimeout > 0) {
Cache<String, Boolean> tempMap = CacheBuilder.newBuilder()
.expireAfterWrite(negativeCacheTimeout, TimeUnit.MILLISECONDS)
.ticker(new TimerToTickerAdapter(timer))
.build();
negativeCache = Collections.newSetFromMap(tempMap.asMap());
}
if(LOG.isDebugEnabled())
LOG.debug("Group mapping impl=" + impl.getClass().getName() +
"; cacheTimeout=" + cacheTimeout + "; warningDeltaMs=" +
warningDeltaMs);
}
示例12: ReplicationSourceManager
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* Creates a replication manager and sets the watch on all the other registered region servers
* @param replicationQueues the interface for manipulating replication queues
* @param replicationPeers
* @param replicationTracker
* @param conf the configuration to use
* @param server the server for this region server
* @param fs the file system to use
* @param logDir the directory that contains all wal directories of live RSs
* @param oldLogDir the directory where old logs are archived
* @param clusterId
*/
public ReplicationSourceManager(final ReplicationQueues replicationQueues,
final ReplicationPeers replicationPeers, final ReplicationTracker replicationTracker,
final Configuration conf, final Server server, final FileSystem fs, final Path logDir,
final Path oldLogDir, final UUID clusterId) {
//CopyOnWriteArrayList is thread-safe.
//Generally, reading is more than modifying.
this.sources = new CopyOnWriteArrayList<ReplicationSourceInterface>();
this.replicationQueues = replicationQueues;
this.replicationPeers = replicationPeers;
this.replicationTracker = replicationTracker;
this.server = server;
this.walsById = new HashMap<String, Map<String, SortedSet<String>>>();
this.walsByIdRecoveredQueues = new ConcurrentHashMap<String, Map<String, SortedSet<String>>>();
this.oldsources = new CopyOnWriteArrayList<ReplicationSourceInterface>();
this.conf = conf;
this.fs = fs;
this.logDir = logDir;
this.oldLogDir = oldLogDir;
this.sleepBeforeFailover =
conf.getLong("replication.sleep.before.failover", 30000); // 30 seconds
this.clusterId = clusterId;
this.replicationTracker.registerListener(this);
this.replicationPeers.getAllPeerIds();
// It's preferable to failover 1 RS at a time, but with good zk servers
// more could be processed at the same time.
int nbWorkers = conf.getInt("replication.executor.workers", 1);
// use a short 100ms sleep since this could be done inline with a RS startup
// even if we fail, other region servers can take care of it
this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers,
100, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>());
ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
tfb.setNameFormat("ReplicationExecutor-%d");
tfb.setDaemon(true);
this.executor.setThreadFactory(tfb.build());
this.rand = new Random();
this.latestPaths = Collections.synchronizedSet(new HashSet<Path>());
}
示例13: setup
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
protected void setup(Context context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
this.reduceSleepCount =
conf.getInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
this.reduceSleepDuration = reduceSleepCount == 0 ? 0 :
conf.getLong(REDUCE_SLEEP_TIME , 100) / reduceSleepCount;
}
示例14: HConnectionImplementation
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
* For tests.
*/
protected HConnectionImplementation(Configuration conf) {
this.conf = conf;
this.connectionConfig = new ConnectionConfiguration(conf);
this.closed = false;
this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
this.useMetaReplicas = conf.getBoolean(HConstants.USE_META_REPLICAS,
HConstants.DEFAULT_USE_META_REPLICAS);
this.numTries = connectionConfig.getRetriesNumber();
this.rpcTimeout = conf.getInt(
HConstants.HBASE_RPC_TIMEOUT_KEY,
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
if (conf.getBoolean(CLIENT_NONCES_ENABLED_KEY, true)) {
synchronized (nonceGeneratorCreateLock) {
if (ConnectionManager.nonceGenerator == null) {
ConnectionManager.nonceGenerator = new PerClientRandomNonceGenerator();
}
this.nonceGenerator = ConnectionManager.nonceGenerator;
}
} else {
this.nonceGenerator = new NoNonceGenerator();
}
stats = ServerStatisticTracker.create(conf);
this.asyncProcess = createAsyncProcess(this.conf);
this.interceptor = (new RetryingCallerInterceptorFactory(conf)).build();
this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(conf, interceptor, this.stats);
this.backoffPolicy = ClientBackoffPolicyFactory.create(conf);
if (conf.getBoolean(CLIENT_SIDE_METRICS_ENABLED_KEY, false)) {
this.metrics = new MetricsConnection(this);
} else {
this.metrics = null;
}
this.hostnamesCanChange = conf.getBoolean(RESOLVE_HOSTNAME_ON_FAIL_KEY, true);
this.metaCache = new MetaCache(this.metrics);
}
示例15: checkMemoryUpperLimits
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
private static boolean checkMemoryUpperLimits(String jobKey, String limitKey,
Configuration conf,
boolean convertLimitToMB) {
if (conf.get(limitKey) != null) {
long limit = conf.getLong(limitKey, JobConf.DISABLED_MEMORY_LIMIT);
// scale only if the max memory limit is set.
if (limit >= 0) {
if (convertLimitToMB) {
limit /= (1024 * 1024); //Converting to MB
}
long scaledConfigValue =
conf.getLong(jobKey, JobConf.DISABLED_MEMORY_LIMIT);
// check now
if (scaledConfigValue > limit) {
throw new RuntimeException("Simulated job's configuration"
+ " parameter '" + jobKey + "' got scaled to a value '"
+ scaledConfigValue + "' which exceeds the upper limit of '"
+ limit + "' defined for the simulated cluster by the key '"
+ limitKey + "'. To disable High-Ram feature emulation, set '"
+ GRIDMIX_HIGHRAM_EMULATION_ENABLE + "' to 'false'.");
}
return true;
}
}
return false;
}