当前位置: 首页>>代码示例>>Java>>正文


Java Configuration.getDouble方法代码示例

本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getDouble方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getDouble方法的具体用法?Java Configuration.getDouble怎么用?Java Configuration.getDouble使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.getDouble方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: super

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public DefaultSpeculator
    (Configuration conf, AppContext context,
     TaskRuntimeEstimator estimator, Clock clock) {
  super(DefaultSpeculator.class.getName());

  this.conf = conf;
  this.context = context;
  this.estimator = estimator;
  this.clock = clock;
  this.eventHandler = context.getEventHandler();
  this.soonestRetryAfterNoSpeculate =
      conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE,
              MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_NO_SPECULATE);
  this.soonestRetryAfterSpeculate =
      conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE,
              MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_SPECULATE);
  this.proportionRunningTasksSpeculatable =
      conf.getDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS,
              MRJobConfig.DEFAULT_SPECULATIVECAP_RUNNING_TASKS);
  this.proportionTotalTasksSpeculatable =
      conf.getDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS,
              MRJobConfig.DEFAULT_SPECULATIVECAP_TOTAL_TASKS);
  this.minimumAllowedSpeculativeTasks =
      conf.getInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS,
              MRJobConfig.DEFAULT_SPECULATIVE_MINIMUM_ALLOWED_TASKS);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:DefaultSpeculator.java

示例2: parseDecayFactor

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static double parseDecayFactor(String ns, Configuration conf) {
  double factor = conf.getDouble(ns + "." +
      IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,
    IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT
  );

  if (factor <= 0 || factor >= 1) {
    throw new IllegalArgumentException("Decay Factor " +
      "must be between 0 and 1");
  }

  return factor;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:DecayRpcScheduler.java

示例3: configureForRegion

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
protected void configureForRegion(HRegion region) {
  super.configureForRegion(region);
  Configuration conf = getConf();
  HTableDescriptor desc = region.getTableDesc();
  if (desc != null) {
    this.desiredMaxFileSize = desc.getMaxFileSize();
  }
  if (this.desiredMaxFileSize <= 0) {
    this.desiredMaxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
      HConstants.DEFAULT_MAX_FILE_SIZE);
  }
  double jitter = conf.getDouble("hbase.hregion.max.filesize.jitter", 0.25D);
  this.desiredMaxFileSize += (long)(desiredMaxFileSize * (RANDOM.nextFloat() - 0.5D) * jitter);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:ConstantSizeRegionSplitPolicy.java

示例4: WorkerManager

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public WorkerManager(AMContext context) {
  this.context = context;
  
  ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
  readLock = readWriteLock.readLock();
  writeLock = readWriteLock.writeLock();

  Configuration conf = context.getConf();
  workersInGroup =
      conf.getInt(AngelConf.ANGEL_WORKERGROUP_WORKER_NUMBER,
          AngelConf.DEFAULT_ANGEL_WORKERGROUP_WORKER_NUMBER);

  taskNumberInEachWorker =
      conf.getInt(AngelConf.ANGEL_WORKER_TASK_NUMBER,
          AngelConf.DEFAULT_ANGEL_WORKER_TASK_NUMBER);

  tolerateFailedGroup =
      conf.getDouble(AngelConf.ANGEL_WORKERGROUP_FAILED_TOLERATE, conf.getDouble(
          AngelConf.ANGEL_TASK_ERROR_TOLERATE,
          AngelConf.DEFAULT_ANGEL_TASK_ERROR_TOLERATE));

  int workerMemory =
      conf.getInt(AngelConf.ANGEL_WORKER_MEMORY_GB,
          AngelConf.DEFAULT_ANGEL_WORKER_MEMORY_GB) * 1024;
  int workerVcores =
      conf.getInt(AngelConf.ANGEL_WORKER_CPU_VCORES,
          AngelConf.DEFAULT_ANGEL_WORKER_CPU_VCORES);

  int workerPriority =
      conf.getInt(AngelConf.ANGEL_WORKER_PRIORITY,
          AngelConf.DEFAULT_ANGEL_WORKER_PRIORITY);

  workerResource = Resource.newInstance(workerMemory, workerVcores);
  PRIORITY_WORKER =
      RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
  PRIORITY_WORKER.setPriority(workerPriority);

  workerGroupMap = new HashMap<WorkerGroupId, AMWorkerGroup>();
  findWorkerGroupMap = new HashMap<WorkerId, AMWorkerGroup>();
  workersMap = new HashMap<WorkerId, AMWorker>();
  taskIdToWorkerMap = new HashMap<TaskId, AMWorker>();
  successGroups = new HashSet<WorkerGroupId>();
  killedGroups = new HashSet<WorkerGroupId>();
  failedGroups = new HashSet<WorkerGroupId>();
}
 
开发者ID:Tencent,项目名称:angel,代码行数:46,代码来源:WorkerManager.java

示例5: getRegionReplicaStoreFileRefreshMultiplier

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static double getRegionReplicaStoreFileRefreshMultiplier(Configuration conf) {
  return conf.getDouble(REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER,
    DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:5,代码来源:ServerRegionReplicaUtil.java


注:本文中的org.apache.hadoop.conf.Configuration.getDouble方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。