当前位置: 首页>>代码示例>>Java>>正文


Java Configuration.getFloat方法代码示例

本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getFloat方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getFloat方法的具体用法?Java Configuration.getFloat怎么用?Java Configuration.getFloat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.getFloat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initialize

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void initialize(Configuration conf, ResourceUsageMetrics metrics,
                       ResourceCalculatorPlugin monitor,
                       Progressive progress) {
  this.monitor = monitor;
  this.progress = progress;
  
  // get the target CPU usage
  targetCpuUsage = metrics.getCumulativeCpuUsage();
  if (targetCpuUsage <= 0 ) {
    enabled = false;
    return;
  } else {
    enabled = true;
  }
  
  emulationInterval =  conf.getFloat(CPU_EMULATION_PROGRESS_INTERVAL, 
                                     DEFAULT_EMULATION_FREQUENCY);
  
  // calibrate the core cpu-usage utility
  emulatorCore.calibrate(monitor, targetCpuUsage);
  
  // initialize the states
  lastSeenProgress = 0;
  lastSeenCpuUsage = 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:CumulativeCpuUsageEmulatorPlugin.java

示例2: RegionMonitor

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp,
    Sink sink, ExecutorService executor, boolean writeSniffing, TableName writeTableName,
    boolean treatFailureAsError) {
  super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError);
  Configuration conf = connection.getConfiguration();
  this.writeSniffing = writeSniffing;
  this.writeTableName = writeTableName;
  this.writeDataTTL =
      conf.getInt(HConstants.HBASE_CANARY_WRITE_DATA_TTL_KEY, DEFAULT_WRITE_DATA_TTL);
  this.regionsLowerLimit =
      conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY, 1.0f);
  this.regionsUpperLimit =
      conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_UPPERLIMIT_KEY, 1.5f);
  this.checkPeriod =
      conf.getInt(HConstants.HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY,
        DEFAULT_WRITE_TABLE_CHECK_PERIOD);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:Canary.java

示例3: CompactionConfiguration

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
CompactionConfiguration(Configuration conf, StoreConfigInformation storeConfigInfo) {
  this.conf = conf;
  this.storeConfigInfo = storeConfigInfo;

  maxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, Long.MAX_VALUE);
  offPeakMaxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, 
    maxCompactSize);      
  minCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY,
      storeConfigInfo.getMemstoreFlushSize());
  minFilesToCompact = Math.max(2, conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY,
        /*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3)));
  maxFilesToCompact = conf.getInt(HBASE_HSTORE_COMPACTION_MAX_KEY, 10);
  compactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F);
  offPeakCompactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY, 5.0F);

  throttlePoint = conf.getLong("hbase.regionserver.thread.compaction.throttle",
        2 * maxFilesToCompact * storeConfigInfo.getMemstoreFlushSize());
  majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24*7);
  // Make it 0.5 so jitter has us fall evenly either side of when the compaction should run
  majorCompactionJitter = conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.50F);
  minLocalityToForceCompact = conf.getFloat(HBASE_HSTORE_MIN_LOCALITY_TO_SKIP_MAJOR_COMPACT, 0f);
  LOG.info(this);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:CompactionConfiguration.java

示例4: setConf

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void setConf(Configuration conf) {
  this.conf = conf;
  this.maximumStepSize = conf.getFloat(MAX_STEP_KEY, DEFAULT_MAX_STEP_VALUE);
  this.minimumStepSize = conf.getFloat(MIN_STEP_KEY, DEFAULT_MIN_STEP_VALUE);
  this.step = this.maximumStepSize;
  this.sufficientMemoryLevel = conf.getFloat(SUFFICIENT_MEMORY_LEVEL_KEY,
      DEFAULT_SUFFICIENT_MEMORY_LEVEL_VALUE);
  this.tunerLookupPeriods = conf.getInt(LOOKUP_PERIODS_KEY, DEFAULT_LOOKUP_PERIODS);
  this.blockCachePercentMinRange = conf.getFloat(BLOCK_CACHE_SIZE_MIN_RANGE_KEY,
      conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT));
  this.blockCachePercentMaxRange = conf.getFloat(BLOCK_CACHE_SIZE_MAX_RANGE_KEY,
      conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT));
  this.globalMemStorePercentMinRange = conf.getFloat(MEMSTORE_SIZE_MIN_RANGE_KEY,
      HeapMemorySizeUtil.getGlobalMemStorePercent(conf, false));
  this.globalMemStorePercentMaxRange = conf.getFloat(MEMSTORE_SIZE_MAX_RANGE_KEY,
      HeapMemorySizeUtil.getGlobalMemStorePercent(conf, false));
  // Default value of periods to ignore is number of lookup periods
  this.numPeriodsToIgnore = conf.getInt(NUM_PERIODS_TO_IGNORE, this.tunerLookupPeriods);
  this.rollingStatsForCacheMisses = new RollingStatCalculator(this.tunerLookupPeriods);
  this.rollingStatsForFlushes = new RollingStatCalculator(this.tunerLookupPeriods);
  this.rollingStatsForEvictions = new RollingStatCalculator(this.tunerLookupPeriods);
  this.rollingStatsForTunerSteps = new RollingStatCalculator(this.tunerLookupPeriods);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:DefaultHeapMemoryTuner.java

示例5: initRetryCache

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@VisibleForTesting
static RetryCache initRetryCache(Configuration conf) {
  boolean enable = conf.getBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY,
                                   DFS_NAMENODE_ENABLE_RETRY_CACHE_DEFAULT);
  LOG.info("Retry cache on namenode is " + (enable ? "enabled" : "disabled"));
  if (enable) {
    float heapPercent = conf.getFloat(
        DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_KEY,
        DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT);
    long entryExpiryMillis = conf.getLong(
        DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY,
        DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT);
    LOG.info("Retry cache will use " + heapPercent
        + " of total heap and retry cache entry expiry time is "
        + entryExpiryMillis + " millis");
    long entryExpiryNanos = entryExpiryMillis * 1000 * 1000;
    return new RetryCache("NameNodeRetryCache", heapPercent,
        entryExpiryNanos);
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FSNamesystem.java

示例6: setConf

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public synchronized void setConf(Configuration conf) {
  balancedSpaceThreshold = conf.getLong(
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY,
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT);
  balancedPreferencePercent = conf.getFloat(
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY,
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT);
  
  LOG.info("Available space volume choosing policy initialized: " +
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY +
      " = " + balancedSpaceThreshold + ", " +
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
      " = " + balancedPreferencePercent);

  if (balancedPreferencePercent > 1.0) {
    LOG.warn("The value of " + DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
             " is greater than 1.0 but should be in the range 0.0 - 1.0");
  }

  if (balancedPreferencePercent < 0.5) {
    LOG.warn("The value of " + DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
             " is less than 0.5 so volumes with less available disk space will receive more block allocations");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:AvailableSpaceVolumeChoosingPolicy.java

示例7: initialize

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void initialize(Configuration conf, FileSystem fs) {
  this.fs = fs;
  this.deletionInterval = (long)(conf.getFloat(
      FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)
      * MSECS_PER_MINUTE);
  this.emptierInterval = (long)(conf.getFloat(
      FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
      * MSECS_PER_MINUTE);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:11,代码来源:TrashPolicyDefault.java

示例8: initBloomFilter

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private synchronized void initBloomFilter(Configuration conf) {
  numKeys = conf.getInt("io.mapfile.bloom.size", 1024 * 1024);
  // vector size should be <code>-kn / (ln(1 - c^(1/k)))</code> bits for
  // single key, where <code> is the number of hash functions,
  // <code>n</code> is the number of keys and <code>c</code> is the desired
  // max. error rate.
  // Our desired error rate is by default 0.005, i.e. 0.5%
  float errorRate = conf.getFloat("io.mapfile.bloom.error.rate", 0.005f);
  vectorSize = (int)Math.ceil((double)(-HASH_COUNT * numKeys) /
      Math.log(1.0 - Math.pow(errorRate, 1.0/HASH_COUNT)));
  bloomFilter = new DynamicBloomFilter(vectorSize, HASH_COUNT,
      Hash.getHashType(conf), numKeys);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:BloomMapFile.java

示例9: HeapMemoryManager

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@VisibleForTesting
HeapMemoryManager(ResizableBlockCache blockCache, FlushRequester memStoreFlusher,
              Server server, RegionServerAccounting regionServerAccounting) {
  Configuration conf = server.getConfiguration();
  this.blockCache = blockCache;
  this.memStoreFlusher = memStoreFlusher;
  this.server = server;
  this.regionServerAccounting = regionServerAccounting;
  this.tunerOn = doInit(conf);
  this.defaultChorePeriod = conf.getInt(HBASE_RS_HEAP_MEMORY_TUNER_PERIOD,
    HBASE_RS_HEAP_MEMORY_TUNER_DEFAULT_PERIOD);
  this.heapOccupancyLowWatermark = conf.getFloat(HConstants.HEAP_OCCUPANCY_LOW_WATERMARK_KEY,
    HConstants.DEFAULT_HEAP_OCCUPANCY_LOW_WATERMARK);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:HeapMemoryManager.java

示例10: ReplicationSinkManager

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Instantiate for a single replication peer cluster.
 * @param conn connection to the peer cluster
 * @param peerClusterId identifier of the peer cluster
 * @param endpoint replication endpoint for inter cluster replication
 * @param conf HBase configuration, used for determining replication source ratio and bad peer
 *          threshold
 */
public ReplicationSinkManager(HConnection conn, String peerClusterId,
    HBaseReplicationEndpoint endpoint, Configuration conf) {
  this.conn = conn;
  this.peerClusterId = peerClusterId;
  this.endpoint = endpoint;
  this.badReportCounts = Maps.newHashMap();
  this.ratio = conf.getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO);
  this.badSinkThreshold = conf.getInt("replication.bad.sink.threshold",
                                      DEFAULT_BAD_SINK_THRESHOLD);
  this.random = new Random();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:ReplicationSinkManager.java

示例11: getBestLocations

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * This computes the locations to be passed from the InputSplit. MR/Yarn schedulers does not take
 * weights into account, thus will treat every location passed from the input split as equal. We
 * do not want to blindly pass all the locations, since we are creating one split per region, and
 * the region's blocks are all distributed throughout the cluster unless favorite node assignment
 * is used. On the expected stable case, only one location will contain most of the blocks as
 * local.
 * On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. Here
 * we are doing a simple heuristic, where we will pass all hosts which have at least 80%
 * (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top
 * host with the best locality.
 */
public static List<String> getBestLocations(
    Configuration conf, HDFSBlocksDistribution blockDistribution) {
  List<String> locations = new ArrayList<String>(3);

  HostAndWeight[] hostAndWeights = blockDistribution.getTopHostsWithWeights();

  if (hostAndWeights.length == 0) {
    return locations;
  }

  HostAndWeight topHost = hostAndWeights[0];
  locations.add(topHost.getHost());

  // Heuristic: filter all hosts which have at least cutoffMultiplier % of block locality
  double cutoffMultiplier
    = conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER);

  double filterWeight = topHost.getWeight() * cutoffMultiplier;

  for (int i = 1; i < hostAndWeights.length; i++) {
    if (hostAndWeights[i].getWeight() >= filterWeight) {
      locations.add(hostAndWeights[i].getHost());
    } else {
      break;
    }
  }

  return locations;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:42,代码来源:TableSnapshotInputFormatImpl.java

示例12: getL2BlockCacheHeapPercent

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * @param conf
 * @return The on heap size for L2 block cache.
 */
public static float getL2BlockCacheHeapPercent(Configuration conf) {
  float l2CachePercent = 0.0F;
  String bucketCacheIOEngineName = conf.get(HConstants.BUCKET_CACHE_IOENGINE_KEY, null);
  // L2 block cache can be on heap when IOEngine is "heap"
  if (bucketCacheIOEngineName != null && bucketCacheIOEngineName.startsWith("heap")) {
    float bucketCachePercentage = conf.getFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F);
    MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
    l2CachePercent = bucketCachePercentage < 1 ? bucketCachePercentage
        : (bucketCachePercentage * 1024 * 1024) / mu.getMax();
  }
  return l2CachePercent;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:HeapMemorySizeUtil.java

示例13: getGlobalMemStorePercent

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Retrieve global memstore configured size as percentage of total heap.
 * @param c
 * @param logInvalid
 */
public static float getGlobalMemStorePercent(final Configuration c, final boolean logInvalid) {
  float limit = c.getFloat(MEMSTORE_SIZE_KEY,
      c.getFloat(MEMSTORE_SIZE_OLD_KEY, DEFAULT_MEMSTORE_SIZE));
  if (limit > 0.8f || limit <= 0.0f) {
    if (logInvalid) {
      LOG.warn("Setting global memstore limit to default of " + DEFAULT_MEMSTORE_SIZE
          + " because supplied value outside allowed range of (0 -> 0.8]");
    }
    limit = DEFAULT_MEMSTORE_SIZE;
  }
  return limit;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:HeapMemorySizeUtil.java

示例14: CacheManager

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:CacheManager.java

示例15: initialize

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void initialize(Configuration conf,  FSClusterStats stats,
                       NetworkTopology clusterMap, 
                       Host2NodesMap host2datanodeMap) {
  this.considerLoad = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);

  this.stats = stats;
  this.clusterMap = clusterMap;
  this.host2datanodeMap = host2datanodeMap;
  this.heartbeatInterval = conf.getLong(
      DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000;
  this.tolerateHeartbeatMultiplier = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY,
      DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT);
  this.staleInterval = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
      DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);


  this.considerDfsUsedPercentTresholdPercent = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDER_DFS_USED_PERCENT_THRESHOLD,
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDER_DFS_USED_PERCENT_THRESHOLD_DEFAULT);

  this.considerDfsUsedPercent = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDER_DFS_USED_PERCENT_KEY,
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDER_DFS_USED_PERCENT_DEFAULT);

  this.considerDfsUsedPercentFactor = conf.getFloat(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDER_DFS_USED_PERCENT_FACTOR,
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDER_DFS_USED_PERCENT_FACTOR_DEFAULT);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:BlockPlacementPolicyDefault.java


注:本文中的org.apache.hadoop.conf.Configuration.getFloat方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。