本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getDouble方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getDouble方法的具体用法?Java Configuration.getDouble怎么用?Java Configuration.getDouble使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.getDouble方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: super
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public DefaultSpeculator
(Configuration conf, AppContext context,
TaskRuntimeEstimator estimator, Clock clock) {
super(DefaultSpeculator.class.getName());
this.conf = conf;
this.context = context;
this.estimator = estimator;
this.clock = clock;
this.eventHandler = context.getEventHandler();
this.soonestRetryAfterNoSpeculate =
conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE,
MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_NO_SPECULATE);
this.soonestRetryAfterSpeculate =
conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE,
MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_SPECULATE);
this.proportionRunningTasksSpeculatable =
conf.getDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS,
MRJobConfig.DEFAULT_SPECULATIVECAP_RUNNING_TASKS);
this.proportionTotalTasksSpeculatable =
conf.getDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS,
MRJobConfig.DEFAULT_SPECULATIVECAP_TOTAL_TASKS);
this.minimumAllowedSpeculativeTasks =
conf.getInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS,
MRJobConfig.DEFAULT_SPECULATIVE_MINIMUM_ALLOWED_TASKS);
}
示例2: parseDecayFactor
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static double parseDecayFactor(String ns, Configuration conf) {
double factor = conf.getDouble(ns + "." +
IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,
IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT
);
if (factor <= 0 || factor >= 1) {
throw new IllegalArgumentException("Decay Factor " +
"must be between 0 and 1");
}
return factor;
}
示例3: configureForRegion
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
protected void configureForRegion(HRegion region) {
super.configureForRegion(region);
Configuration conf = getConf();
HTableDescriptor desc = region.getTableDesc();
if (desc != null) {
this.desiredMaxFileSize = desc.getMaxFileSize();
}
if (this.desiredMaxFileSize <= 0) {
this.desiredMaxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
HConstants.DEFAULT_MAX_FILE_SIZE);
}
double jitter = conf.getDouble("hbase.hregion.max.filesize.jitter", 0.25D);
this.desiredMaxFileSize += (long)(desiredMaxFileSize * (RANDOM.nextFloat() - 0.5D) * jitter);
}
示例4: WorkerManager
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public WorkerManager(AMContext context) {
this.context = context;
ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
readLock = readWriteLock.readLock();
writeLock = readWriteLock.writeLock();
Configuration conf = context.getConf();
workersInGroup =
conf.getInt(AngelConf.ANGEL_WORKERGROUP_WORKER_NUMBER,
AngelConf.DEFAULT_ANGEL_WORKERGROUP_WORKER_NUMBER);
taskNumberInEachWorker =
conf.getInt(AngelConf.ANGEL_WORKER_TASK_NUMBER,
AngelConf.DEFAULT_ANGEL_WORKER_TASK_NUMBER);
tolerateFailedGroup =
conf.getDouble(AngelConf.ANGEL_WORKERGROUP_FAILED_TOLERATE, conf.getDouble(
AngelConf.ANGEL_TASK_ERROR_TOLERATE,
AngelConf.DEFAULT_ANGEL_TASK_ERROR_TOLERATE));
int workerMemory =
conf.getInt(AngelConf.ANGEL_WORKER_MEMORY_GB,
AngelConf.DEFAULT_ANGEL_WORKER_MEMORY_GB) * 1024;
int workerVcores =
conf.getInt(AngelConf.ANGEL_WORKER_CPU_VCORES,
AngelConf.DEFAULT_ANGEL_WORKER_CPU_VCORES);
int workerPriority =
conf.getInt(AngelConf.ANGEL_WORKER_PRIORITY,
AngelConf.DEFAULT_ANGEL_WORKER_PRIORITY);
workerResource = Resource.newInstance(workerMemory, workerVcores);
PRIORITY_WORKER =
RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
PRIORITY_WORKER.setPriority(workerPriority);
workerGroupMap = new HashMap<WorkerGroupId, AMWorkerGroup>();
findWorkerGroupMap = new HashMap<WorkerId, AMWorkerGroup>();
workersMap = new HashMap<WorkerId, AMWorker>();
taskIdToWorkerMap = new HashMap<TaskId, AMWorker>();
successGroups = new HashSet<WorkerGroupId>();
killedGroups = new HashSet<WorkerGroupId>();
failedGroups = new HashSet<WorkerGroupId>();
}
示例5: getRegionReplicaStoreFileRefreshMultiplier
import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static double getRegionReplicaStoreFileRefreshMultiplier(Configuration conf) {
return conf.getDouble(REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER,
DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER);
}