當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.getLong方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.getLong方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.getLong方法的具體用法?Java Configuration.getLong怎麽用?Java Configuration.getLong使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.getLong方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: setConf

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public synchronized void setConf(Configuration conf) {
  balancedSpaceThreshold = conf.getLong(
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY,
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT);
  balancedPreferencePercent = conf.getFloat(
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY,
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT);
  
  LOG.info("Available space volume choosing policy initialized: " +
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY +
      " = " + balancedSpaceThreshold + ", " +
      DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
      " = " + balancedPreferencePercent);

  if (balancedPreferencePercent > 1.0) {
    LOG.warn("The value of " + DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
             " is greater than 1.0 but should be in the range 0.0 - 1.0");
  }

  if (balancedPreferencePercent < 0.5) {
    LOG.warn("The value of " + DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
             " is less than 0.5 so volumes with less available disk space will receive more block allocations");
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:AvailableSpaceVolumeChoosingPolicy.java

示例2: validateConfigs

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
protected static void validateConfigs(Configuration conf) {
  // validate max-attempts
  int globalMaxAppAttempts =
      conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
      YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
  if (globalMaxAppAttempts <= 0) {
    throw new YarnRuntimeException("Invalid global max attempts configuration"
        + ", " + YarnConfiguration.RM_AM_MAX_ATTEMPTS
        + "=" + globalMaxAppAttempts + ", it should be a positive integer.");
  }

  // validate expireIntvl >= heartbeatIntvl
  long expireIntvl = conf.getLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
      YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
  long heartbeatIntvl =
      conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS,
          YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS);
  if (expireIntvl < heartbeatIntvl) {
    throw new YarnRuntimeException("Nodemanager expiry interval should be no"
        + " less than heartbeat interval, "
        + YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS + "=" + expireIntvl
        + ", " + YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS + "="
        + heartbeatIntvl);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:ResourceManager.java

示例3: createJob

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public Job createJob(Configuration conf) throws IOException {
  long numBytesToWritePerMap = conf.getLong(BYTES_PER_MAP, 10 * 1024);
  long totalBytesToWrite = conf.getLong(TOTAL_BYTES, numBytesToWritePerMap);
  int numMaps = (int) (totalBytesToWrite / numBytesToWritePerMap);
  if (numMaps == 0 && totalBytesToWrite > 0) {
    numMaps = 1;
    conf.setLong(BYTES_PER_MAP, totalBytesToWrite);
  }
  conf.setInt(MRJobConfig.NUM_MAPS, numMaps);

  Job job = Job.getInstance(conf);

  job.setJarByClass(RandomTextWriterJob.class);
  job.setJobName("random-text-writer");

  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);

  job.setInputFormatClass(RandomInputFormat.class);
  job.setMapperClass(RandomTextMapper.class);

  job.setOutputFormatClass(SequenceFileOutputFormat.class);
  //FileOutputFormat.setOutputPath(job, new Path("random-output"));
  job.setNumReduceTasks(0);
  return job;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:RandomTextWriterJob.java

示例4: configure

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Setup the configuration for a constraint as to whether it is enabled and
 * its priority
 * 
 * @param conf
 *          on which to base the new configuration
 * @param enabled
 *          <tt>true</tt> if it should be run
 * @param priority
 *          relative to other constraints
 * @return a new configuration, storable in the {@link HTableDescriptor}
 */
private static Configuration configure(Configuration conf, boolean enabled,
    long priority) {
  // create the configuration to actually be stored
  // clone if possible, but otherwise just create an empty configuration
  Configuration toWrite = conf == null ? new Configuration()
      : new Configuration(conf);

  // update internal properties
  toWrite.setBooleanIfUnset(ENABLED_KEY, enabled);

  // set if unset long
  if (toWrite.getLong(PRIORITY_KEY, UNSET_PRIORITY) == UNSET_PRIORITY) {
    toWrite.setLong(PRIORITY_KEY, priority);
  }

  return toWrite;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:Constraints.java

示例5: getTimeoutOnRIT

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
protected long getTimeoutOnRIT() {
  // Guess timeout.  Multiply the max number of regions on a server
  // by how long we think one region takes opening.
  Configuration conf = server.getConfiguration();
  long perRegionOpenTimeGuesstimate =
    conf.getLong("hbase.bulk.assignment.perregion.open.time", 1000);
  int maxRegionsPerServer = 1;
  for (List<HRegionInfo> regionList : bulkPlan.values()) {
    int size = regionList.size();
    if (size > maxRegionsPerServer) {
      maxRegionsPerServer = size;
    }
  }
  long timeout = perRegionOpenTimeGuesstimate * maxRegionsPerServer
    + conf.getLong("hbase.regionserver.rpc.startup.waittime", 60000)
    + conf.getLong("hbase.bulk.assignment.perregionserver.rpc.waittime",
      30000) * bulkPlan.size();
  LOG.debug("Timeout-on-RIT=" + timeout);
  return timeout;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:GeneralBulkAssigner.java

示例6: createBatchPool

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private void createBatchPool(Configuration conf) {
  // Use the same config for keep alive as in HConnectionImplementation.getBatchPool();
  int maxThreads = conf.getInt("hbase.multihconnection.threads.max", 256);
  int coreThreads = conf.getInt("hbase.multihconnection.threads.core", 256);
  if (maxThreads == 0) {
    maxThreads = Runtime.getRuntime().availableProcessors() * 8;
  }
  if (coreThreads == 0) {
    coreThreads = Runtime.getRuntime().availableProcessors() * 8;
  }
  long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60);
  LinkedBlockingQueue<Runnable> workQueue =
      new LinkedBlockingQueue<Runnable>(maxThreads
          * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
            HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
  ThreadPoolExecutor tpe =
      new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue,
          Threads.newDaemonThreadFactory("MultiHConnection" + "-shared-"));
  tpe.allowCoreThreadTimeOut(true);
  this.batchPool = tpe;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:MultiHConnection.java

示例7: TimeBoundedMultiThreadedReader

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public TimeBoundedMultiThreadedReader(LoadTestDataGenerator dataGen, Configuration conf,
    TableName tableName, double verifyPercent) throws IOException {
  super(dataGen, conf, tableName, verifyPercent);
  long timeoutMs = conf.getLong(
    String.format("%s.%s", TEST_NAME, GET_TIMEOUT_KEY), DEFAULT_GET_TIMEOUT);
  timeoutNano = timeoutMs * 1000000;
  LOG.info("Timeout for gets: " + timeoutMs);
  String runTimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
  this.runTime = conf.getLong(runTimeKey, -1);
  if (this.runTime <= 0) {
    throw new IllegalArgumentException("Please configure " + runTimeKey);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:IntegrationTestTimeBoundedRequestsWithRegionReplicas.java

示例8: serviceInit

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public void serviceInit(Configuration conf) throws Exception {
  nmExpireInterval =
      conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
        YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
  configuredMaximumAllocationWaitTime =
      conf.getLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS,
        YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS);
  createReleaseCache();
  super.serviceInit(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:AbstractYarnScheduler.java

示例9: createDelegationTokenSecretManager

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Create delegation token secret manager
 */
private DelegationTokenSecretManager createDelegationTokenSecretManager(
    Configuration conf) {
  return new DelegationTokenSecretManager(conf.getLong(
      DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY,
      DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT),
      conf.getLong(DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY,
          DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT),
      conf.getLong(DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
          DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT),
      DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL,
      conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY,
          DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT),
      this);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:FSNamesystem.java

示例10: serviceInit

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
protected void serviceInit(Configuration conf) throws Exception {
  LOG.info("JobHistory Init");
  this.conf = conf;
  this.appID = ApplicationId.newInstance(0, 0);
  this.appAttemptID = RecordFactoryProvider.getRecordFactory(conf)
      .newRecordInstance(ApplicationAttemptId.class);

  moveThreadInterval = conf.getLong(
      JHAdminConfig.MR_HISTORY_MOVE_INTERVAL_MS,
      JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_INTERVAL_MS);

  hsManager = createHistoryFileManager();
  hsManager.init(conf);
  try {
    hsManager.initExisting();
  } catch (IOException e) {
    throw new YarnRuntimeException("Failed to intialize existing directories", e);
  }

  storage = createHistoryStorage();
  
  if (storage instanceof Service) {
    ((Service) storage).init(conf);
  }
  storage.setHistoryFileManager(hsManager);

  super.serviceInit(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:JobHistory.java

示例11: Groups

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public Groups(Configuration conf, final Timer timer) {
  impl = 
    ReflectionUtils.newInstance(
        conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, 
                      ShellBasedUnixGroupsMapping.class, 
                      GroupMappingServiceProvider.class), 
        conf);

  cacheTimeout = 
    conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 
        CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000;
  negativeCacheTimeout =
    conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,
        CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT) * 1000;
  warningDeltaMs =
    conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS,
      CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT);
  parseStaticMapping(conf);

  this.timer = timer;
  this.cache = CacheBuilder.newBuilder()
    .refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS)
    .ticker(new TimerToTickerAdapter(timer))
    .expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS)
    .build(new GroupCacheLoader());

  if(negativeCacheTimeout > 0) {
    Cache<String, Boolean> tempMap = CacheBuilder.newBuilder()
      .expireAfterWrite(negativeCacheTimeout, TimeUnit.MILLISECONDS)
      .ticker(new TimerToTickerAdapter(timer))
      .build();
    negativeCache = Collections.newSetFromMap(tempMap.asMap());
  }

  if(LOG.isDebugEnabled())
    LOG.debug("Group mapping impl=" + impl.getClass().getName() + 
        "; cacheTimeout=" + cacheTimeout + "; warningDeltaMs=" +
        warningDeltaMs);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:40,代碼來源:Groups.java

示例12: ReplicationSourceManager

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Creates a replication manager and sets the watch on all the other registered region servers
 * @param replicationQueues the interface for manipulating replication queues
 * @param replicationPeers
 * @param replicationTracker
 * @param conf the configuration to use
 * @param server the server for this region server
 * @param fs the file system to use
 * @param logDir the directory that contains all wal directories of live RSs
 * @param oldLogDir the directory where old logs are archived
 * @param clusterId
 */
public ReplicationSourceManager(final ReplicationQueues replicationQueues,
    final ReplicationPeers replicationPeers, final ReplicationTracker replicationTracker,
    final Configuration conf, final Server server, final FileSystem fs, final Path logDir,
    final Path oldLogDir, final UUID clusterId) {
  //CopyOnWriteArrayList is thread-safe.
  //Generally, reading is more than modifying.
  this.sources = new CopyOnWriteArrayList<ReplicationSourceInterface>();
  this.replicationQueues = replicationQueues;
  this.replicationPeers = replicationPeers;
  this.replicationTracker = replicationTracker;
  this.server = server;
  this.walsById = new HashMap<String, Map<String, SortedSet<String>>>();
  this.walsByIdRecoveredQueues = new ConcurrentHashMap<String, Map<String, SortedSet<String>>>();
  this.oldsources = new CopyOnWriteArrayList<ReplicationSourceInterface>();
  this.conf = conf;
  this.fs = fs;
  this.logDir = logDir;
  this.oldLogDir = oldLogDir;
  this.sleepBeforeFailover =
      conf.getLong("replication.sleep.before.failover", 30000); // 30 seconds
  this.clusterId = clusterId;
  this.replicationTracker.registerListener(this);
  this.replicationPeers.getAllPeerIds();
  // It's preferable to failover 1 RS at a time, but with good zk servers
  // more could be processed at the same time.
  int nbWorkers = conf.getInt("replication.executor.workers", 1);
  // use a short 100ms sleep since this could be done inline with a RS startup
  // even if we fail, other region servers can take care of it
  this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers,
      100, TimeUnit.MILLISECONDS,
      new LinkedBlockingQueue<Runnable>());
  ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
  tfb.setNameFormat("ReplicationExecutor-%d");
  tfb.setDaemon(true);
  this.executor.setThreadFactory(tfb.build());
  this.rand = new Random();
  this.latestPaths = Collections.synchronizedSet(new HashSet<Path>());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:51,代碼來源:ReplicationSourceManager.java

示例13: setup

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
protected void setup(Context context) 
  throws IOException, InterruptedException {
  Configuration conf = context.getConfiguration();
  this.reduceSleepCount =
    conf.getInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
  this.reduceSleepDuration = reduceSleepCount == 0 ? 0 : 
    conf.getLong(REDUCE_SLEEP_TIME , 100) / reduceSleepCount;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:SleepJob.java

示例14: HConnectionImplementation

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * For tests.
 */
protected HConnectionImplementation(Configuration conf) {
  this.conf = conf;
  this.connectionConfig = new ConnectionConfiguration(conf);
  this.closed = false;
  this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
  this.useMetaReplicas = conf.getBoolean(HConstants.USE_META_REPLICAS,
      HConstants.DEFAULT_USE_META_REPLICAS);
  this.numTries = connectionConfig.getRetriesNumber();
  this.rpcTimeout = conf.getInt(
      HConstants.HBASE_RPC_TIMEOUT_KEY,
      HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
  if (conf.getBoolean(CLIENT_NONCES_ENABLED_KEY, true)) {
    synchronized (nonceGeneratorCreateLock) {
      if (ConnectionManager.nonceGenerator == null) {
        ConnectionManager.nonceGenerator = new PerClientRandomNonceGenerator();
      }
      this.nonceGenerator = ConnectionManager.nonceGenerator;
    }
  } else {
    this.nonceGenerator = new NoNonceGenerator();
  }
  stats = ServerStatisticTracker.create(conf);
  this.asyncProcess = createAsyncProcess(this.conf);
  this.interceptor = (new RetryingCallerInterceptorFactory(conf)).build();
  this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(conf, interceptor, this.stats);
  this.backoffPolicy = ClientBackoffPolicyFactory.create(conf);
  if (conf.getBoolean(CLIENT_SIDE_METRICS_ENABLED_KEY, false)) {
    this.metrics = new MetricsConnection(this);
  } else {
    this.metrics = null;
  }
  
  this.hostnamesCanChange = conf.getBoolean(RESOLVE_HOSTNAME_ON_FAIL_KEY, true);
  this.metaCache = new MetaCache(this.metrics);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:ConnectionManager.java

示例15: checkMemoryUpperLimits

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@SuppressWarnings("deprecation")
private static boolean checkMemoryUpperLimits(String jobKey, String limitKey,  
                                              Configuration conf, 
                                              boolean convertLimitToMB) {
  if (conf.get(limitKey) != null) {
    long limit = conf.getLong(limitKey, JobConf.DISABLED_MEMORY_LIMIT);
    // scale only if the max memory limit is set.
    if (limit >= 0) {
      if (convertLimitToMB) {
        limit /= (1024 * 1024); //Converting to MB
      }
      
      long scaledConfigValue = 
             conf.getLong(jobKey, JobConf.DISABLED_MEMORY_LIMIT);
      
      // check now
      if (scaledConfigValue > limit) {
        throw new RuntimeException("Simulated job's configuration" 
            + " parameter '" + jobKey + "' got scaled to a value '" 
            + scaledConfigValue + "' which exceeds the upper limit of '" 
            + limit + "' defined for the simulated cluster by the key '" 
            + limitKey + "'. To disable High-Ram feature emulation, set '" 
            + GRIDMIX_HIGHRAM_EMULATION_ENABLE + "' to 'false'.");
      }
      return true;
    }
  }
  return false;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:GridmixJob.java


注:本文中的org.apache.hadoop.conf.Configuration.getLong方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。