本文整理匯總了Java中org.apache.hadoop.conf.Configuration.getDouble方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.getDouble方法的具體用法?Java Configuration.getDouble怎麽用?Java Configuration.getDouble使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.getDouble方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: super
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public DefaultSpeculator
(Configuration conf, AppContext context,
TaskRuntimeEstimator estimator, Clock clock) {
super(DefaultSpeculator.class.getName());
this.conf = conf;
this.context = context;
this.estimator = estimator;
this.clock = clock;
this.eventHandler = context.getEventHandler();
this.soonestRetryAfterNoSpeculate =
conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE,
MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_NO_SPECULATE);
this.soonestRetryAfterSpeculate =
conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE,
MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_SPECULATE);
this.proportionRunningTasksSpeculatable =
conf.getDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS,
MRJobConfig.DEFAULT_SPECULATIVECAP_RUNNING_TASKS);
this.proportionTotalTasksSpeculatable =
conf.getDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS,
MRJobConfig.DEFAULT_SPECULATIVECAP_TOTAL_TASKS);
this.minimumAllowedSpeculativeTasks =
conf.getInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS,
MRJobConfig.DEFAULT_SPECULATIVE_MINIMUM_ALLOWED_TASKS);
}
示例2: parseDecayFactor
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private static double parseDecayFactor(String ns, Configuration conf) {
double factor = conf.getDouble(ns + "." +
IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,
IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT
);
if (factor <= 0 || factor >= 1) {
throw new IllegalArgumentException("Decay Factor " +
"must be between 0 and 1");
}
return factor;
}
示例3: configureForRegion
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
protected void configureForRegion(HRegion region) {
super.configureForRegion(region);
Configuration conf = getConf();
HTableDescriptor desc = region.getTableDesc();
if (desc != null) {
this.desiredMaxFileSize = desc.getMaxFileSize();
}
if (this.desiredMaxFileSize <= 0) {
this.desiredMaxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
HConstants.DEFAULT_MAX_FILE_SIZE);
}
double jitter = conf.getDouble("hbase.hregion.max.filesize.jitter", 0.25D);
this.desiredMaxFileSize += (long)(desiredMaxFileSize * (RANDOM.nextFloat() - 0.5D) * jitter);
}
示例4: WorkerManager
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public WorkerManager(AMContext context) {
this.context = context;
ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
readLock = readWriteLock.readLock();
writeLock = readWriteLock.writeLock();
Configuration conf = context.getConf();
workersInGroup =
conf.getInt(AngelConf.ANGEL_WORKERGROUP_WORKER_NUMBER,
AngelConf.DEFAULT_ANGEL_WORKERGROUP_WORKER_NUMBER);
taskNumberInEachWorker =
conf.getInt(AngelConf.ANGEL_WORKER_TASK_NUMBER,
AngelConf.DEFAULT_ANGEL_WORKER_TASK_NUMBER);
tolerateFailedGroup =
conf.getDouble(AngelConf.ANGEL_WORKERGROUP_FAILED_TOLERATE, conf.getDouble(
AngelConf.ANGEL_TASK_ERROR_TOLERATE,
AngelConf.DEFAULT_ANGEL_TASK_ERROR_TOLERATE));
int workerMemory =
conf.getInt(AngelConf.ANGEL_WORKER_MEMORY_GB,
AngelConf.DEFAULT_ANGEL_WORKER_MEMORY_GB) * 1024;
int workerVcores =
conf.getInt(AngelConf.ANGEL_WORKER_CPU_VCORES,
AngelConf.DEFAULT_ANGEL_WORKER_CPU_VCORES);
int workerPriority =
conf.getInt(AngelConf.ANGEL_WORKER_PRIORITY,
AngelConf.DEFAULT_ANGEL_WORKER_PRIORITY);
workerResource = Resource.newInstance(workerMemory, workerVcores);
PRIORITY_WORKER =
RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
PRIORITY_WORKER.setPriority(workerPriority);
workerGroupMap = new HashMap<WorkerGroupId, AMWorkerGroup>();
findWorkerGroupMap = new HashMap<WorkerId, AMWorkerGroup>();
workersMap = new HashMap<WorkerId, AMWorker>();
taskIdToWorkerMap = new HashMap<TaskId, AMWorker>();
successGroups = new HashSet<WorkerGroupId>();
killedGroups = new HashSet<WorkerGroupId>();
failedGroups = new HashSet<WorkerGroupId>();
}
示例5: getRegionReplicaStoreFileRefreshMultiplier
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static double getRegionReplicaStoreFileRefreshMultiplier(Configuration conf) {
return conf.getDouble(REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER,
DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER);
}