當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.getInt方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.getInt方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.getInt方法的具體用法?Java Configuration.getInt怎麽用?Java Configuration.getInt使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.getInt方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: create

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static HBaseAsyncOperation create(Configuration configuration) throws IOException {
    boolean enableAsyncMethod = configuration.getBoolean(ENABLE_ASYNC_METHOD,
            DEFAULT_ENABLE_ASYNC_METHOD);
    LOGGER.info("hbase.client.async.enable: " + enableAsyncMethod);
    if (!enableAsyncMethod) {
        return DisabledHBaseAsyncOperation.INSTANCE;
    }

    int queueSize = configuration.getInt(ASYNC_IN_QUEUE_SIZE, DEFAULT_ASYNC_IN_QUEUE_SIZE);

    if (configuration.get(ASYNC_PERIODIC_FLUSH_TIME, null) == null) {
        configuration.setInt(ASYNC_PERIODIC_FLUSH_TIME, DEFAULT_ASYNC_PERIODIC_FLUSH_TIME);
    }

    if (configuration.get(ASYNC_RETRY_COUNT, null) == null) {
        configuration.setInt(ASYNC_RETRY_COUNT, DEFAULT_ASYNC_RETRY_COUNT);
    }

    return new HBaseAsyncTemplate(configuration, queueSize);
}
 
開發者ID:fchenxi,項目名稱:easyhbase,代碼行數:21,代碼來源:HBaseAsyncOperationFactory.java

示例2: CryptoExtension

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public CryptoExtension(Configuration conf, 
    KeyProviderCryptoExtension keyProviderCryptoExtension) {
  this.keyProviderCryptoExtension = keyProviderCryptoExtension;
  encKeyVersionQueue =
      new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
          conf.getInt(KMS_KEY_CACHE_SIZE,
              KMS_KEY_CACHE_SIZE_DEFAULT),
          conf.getFloat(KMS_KEY_CACHE_LOW_WATERMARK,
              KMS_KEY_CACHE_LOW_WATERMARK_DEFAULT),
          conf.getInt(KMS_KEY_CACHE_EXPIRY_MS,
              KMS_KEY_CACHE_EXPIRY_DEFAULT),
          conf.getInt(KMS_KEY_CACHE_NUM_REFILL_THREADS,
              KMS_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
          SyncGenerationPolicy.LOW_WATERMARK, new EncryptedQueueRefiller()
      );
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:17,代碼來源:EagerKeyGeneratorKeyProviderCryptoExtension.java

示例3: CheckpointConf

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public CheckpointConf(Configuration conf) {
  checkpointCheckPeriod = conf.getLong(
      DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
      DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT);
      
  checkpointPeriod = conf.getLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
                                  DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
  checkpointTxnCount = conf.getLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 
                                DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
  maxRetriesOnMergeError = conf.getInt(DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY,
                                DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_DEFAULT);
  legacyOivImageDir = conf.get(DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY);
  warnForDeprecatedConfigs(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:CheckpointConf.java

示例4: RESTServlet

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Constructor with existing configuration
 * @param conf existing configuration
 * @param userProvider the login user provider
 * @throws IOException
 */
RESTServlet(final Configuration conf,
    final UserProvider userProvider) throws IOException {
  this.realUser = userProvider.getCurrent().getUGI();
  this.conf = conf;
  registerCustomFilter(conf);

  int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000);
  int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
  connectionCache = new ConnectionCache(
    conf, userProvider, cleanInterval, maxIdleTime);
  if (supportsProxyuser()) {
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:RESTServlet.java

示例5: serviceInit

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public void serviceInit(Configuration conf) throws Exception {
  nmExpireInterval =
      conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
        YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
  configuredMaximumAllocationWaitTime =
      conf.getLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS,
        YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS);
  createReleaseCache();
  super.serviceInit(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:AbstractYarnScheduler.java

示例6: map

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/** Partitions sigma into parts */
@Override
protected void map(NullWritable nw, SummationWritable sigma, final Context context
    ) throws IOException, InterruptedException {
  final Configuration conf = context.getConfiguration();
  final int nParts = conf.getInt(N_PARTS, 0);
  final Summation[] parts = sigma.getElement().partition(nParts);
  for(int i = 0; i < parts.length; ++i) {
    context.write(new IntWritable(i), new SummationWritable(parts[i]));
    LOG.info("parts[" + i + "] = " + parts[i]);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:DistSum.java

示例7: createGeneralBloomAtWrite

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Creates a new general (Row or RowCol) Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 *
 * @param conf
 * @param cacheConf
 * @param bloomType
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, BloomType bloomType, int maxKeys,
    HFile.Writer writer) {
  if (!isGeneralBloomEnabled(conf)) {
    LOG.trace("Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  } else if (bloomType == BloomType.NONE) {
    LOG.trace("Bloom filter is turned off for the column family");
    return null;
  }

  float err = getErrorRate(conf);

  // In case of row/column Bloom filter lookups, each lookup is an OR if two
  // separate lookups. Therefore, if each lookup's false positive rate is p,
  // the resulting false positive rate is err = 1 - (1 - p)^2, and
  // p = 1 - sqrt(1 - err).
  if (bloomType == BloomType.ROWCOL) {
    err = (float) (1 - Math.sqrt(1 - err));
  }

  int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD,
      MAX_ALLOWED_FOLD_FACTOR);

  // Do we support compound bloom filters?
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      bloomType == BloomType.ROWCOL ? KeyValue.COMPARATOR : KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:48,代碼來源:BloomFilterFactory.java

示例8: HeapMemStoreLAB

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public HeapMemStoreLAB(Configuration conf) {
  chunkSize = conf.getInt(CHUNK_SIZE_KEY, CHUNK_SIZE_DEFAULT);
  maxAlloc = conf.getInt(MAX_ALLOC_KEY, MAX_ALLOC_DEFAULT);
  this.chunkPool = MemStoreChunkPool.getPool(conf);

  // if we don't exclude allocations >CHUNK_SIZE, we'd infiniteloop on one!
  Preconditions.checkArgument(
    maxAlloc <= chunkSize,
    MAX_ALLOC_KEY + " must be less than " + CHUNK_SIZE_KEY);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:11,代碼來源:HeapMemStoreLAB.java

示例9: serviceInit

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
protected void serviceInit(Configuration conf) throws Exception {
  super.serviceInit(conf);
  taskTimeOut = conf.getInt(MRJobConfig.TASK_TIMEOUT, 5 * 60 * 1000);
  taskTimeOutCheckInterval =
      conf.getInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 30 * 1000);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:TaskHeartbeatHandler.java

示例10: getMaxChunksTolerable

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private static int getMaxChunksTolerable(Configuration conf) {
  int maxChunksTolerable = conf.getInt(
      DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE,
      DistCpConstants.MAX_CHUNKS_TOLERABLE_DEFAULT);
  if (maxChunksTolerable <= 0) {
    LOG.warn(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE +
        " should be positive. Fall back to default value: "
        + DistCpConstants.MAX_CHUNKS_TOLERABLE_DEFAULT);
    maxChunksTolerable = DistCpConstants.MAX_CHUNKS_TOLERABLE_DEFAULT;
  }
  return maxChunksTolerable;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:DynamicInputFormat.java

示例11: AsyncProcess

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public AsyncProcess(ClusterConnection hc, Configuration conf, ExecutorService pool,
    RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, RpcControllerFactory rpcFactory) {
  if (hc == null) {
    throw new IllegalArgumentException("HConnection cannot be null.");
  }

  this.connection = hc;
  this.pool = pool;
  this.globalErrors = useGlobalErrors ? new BatchErrors() : null;

  this.id = COUNTER.incrementAndGet();

  this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
  this.numTries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  this.timeout = conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
      HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
  this.primaryCallTimeoutMicroseconds = conf.getInt(PRIMARY_CALL_TIMEOUT_KEY, 10000);

  this.maxTotalConcurrentTasks = conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
    HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS);
  this.maxConcurrentTasksPerServer = conf.getInt(HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS,
        HConstants.DEFAULT_HBASE_CLIENT_MAX_PERSERVER_TASKS);
  this.maxConcurrentTasksPerRegion = conf.getInt(HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS,
        HConstants.DEFAULT_HBASE_CLIENT_MAX_PERREGION_TASKS);

  this.startLogErrorsCnt =
      conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT);

  if (this.maxTotalConcurrentTasks <= 0) {
    throw new IllegalArgumentException("maxTotalConcurrentTasks=" + maxTotalConcurrentTasks);
  }
  if (this.maxConcurrentTasksPerServer <= 0) {
    throw new IllegalArgumentException("maxConcurrentTasksPerServer=" +
        maxConcurrentTasksPerServer);
  }
  if (this.maxConcurrentTasksPerRegion <= 0) {
    throw new IllegalArgumentException("maxConcurrentTasksPerRegion=" +
        maxConcurrentTasksPerRegion);
  }

  // Server tracker allows us to do faster, and yet useful (hopefully), retries.
  // However, if we are too useful, we might fail very quickly due to retry count limit.
  // To avoid this, we are going to cheat for now (see HBASE-7659), and calculate maximum
  // retry time if normal retries were used. Then we will retry until this time runs out.
  // If we keep hitting one server, the net effect will be the incremental backoff, and
  // essentially the same number of retries as planned. If we have to do faster retries,
  // we will do more retries in aggregate, but the user will be none the wiser.
  this.serverTrackerTimeout = 0;
  for (int i = 0; i < this.numTries; ++i) {
    serverTrackerTimeout += ConnectionUtils.getPauseTime(this.pause, i);
  }

  this.rpcCallerFactory = rpcCaller;
  this.rpcFactory = rpcFactory;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:58,代碼來源:AsyncProcess.java

示例12: FsDatasetImpl

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * An FSDataset has a directory where it loads its data files.
 */
FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf
    ) throws IOException {
  this.fsRunning = true;
  this.datanode = datanode;
  this.dataStorage = storage;
  this.conf = conf;
  // The number of volumes required for operation is the total number 
  // of volumes minus the number of failed volumes we can tolerate.
  final int volFailuresTolerated =
    conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
                DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);

  String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
  Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
  List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
      dataLocations, storage);

  int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
  int volsFailed = volumeFailureInfos.size();
  this.validVolsRequired = volsConfigured - volFailuresTolerated;

  if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
    throw new DiskErrorException("Invalid volume failure "
        + " config value: " + volFailuresTolerated);
  }
  if (volsFailed > volFailuresTolerated) {
    throw new DiskErrorException("Too many failed volumes - "
        + "current valid volumes: " + storage.getNumStorageDirs() 
        + ", volumes configured: " + volsConfigured 
        + ", volumes failed: " + volsFailed
        + ", volume failures tolerated: " + volFailuresTolerated);
  }

  storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
  volumeMap = new ReplicaMap(this);
  ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);

  @SuppressWarnings("unchecked")
  final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
      ReflectionUtils.newInstance(conf.getClass(
          DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
          RoundRobinVolumeChoosingPolicy.class,
          VolumeChoosingPolicy.class), conf);
  volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
      blockChooserImpl);
  asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
  asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
  deletingBlock = new HashMap<String, Set<Long>>();

  for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
    addVolume(dataLocations, storage.getStorageDir(idx));
  }
  setupAsyncLazyPersistThreads();

  cacheManager = new FsDatasetCache(this);

  // Start the lazy writer once we have built the replica maps.
  lazyWriter = new Daemon(new LazyWriter(conf));
  lazyWriter.start();
  registerMBean(datanode.getDatanodeUuid());
  localFS = FileSystem.getLocal(conf);
  blockPinningEnabled = conf.getBoolean(
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:69,代碼來源:FsDatasetImpl.java

示例13: startThreads

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Create each component in the pipeline and start it.
 * @param conf Configuration data, no keys specific to this context
 * @param traceIn Either a Path to the trace data or &quot;-&quot; for
 *                stdin
 * @param ioPath &lt;ioPath&gt;/input/ is the dir from which input data is
 *               read and &lt;ioPath&gt;/distributedCache/ is the gridmix
 *               distributed cache directory.
 * @param scratchDir Path into which job output is written
 * @param startFlag Semaphore for starting job trace pipeline
 */
@SuppressWarnings("unchecked")
private void startThreads(Configuration conf, String traceIn, Path ioPath,
    Path scratchDir, CountDownLatch startFlag, UserResolver userResolver)
    throws IOException {
  try {
    Path inputDir = getGridmixInputDataPath(ioPath);
    GridmixJobSubmissionPolicy policy = getJobSubmissionPolicy(conf);
    LOG.info(" Submission policy is " + policy.name());
    statistics = new Statistics(conf, policy.getPollingInterval(), startFlag);
    monitor = createJobMonitor(statistics, conf);
    int noOfSubmitterThreads = 
      (policy == GridmixJobSubmissionPolicy.SERIAL) 
      ? 1
      : Runtime.getRuntime().availableProcessors() + 1;

    int numThreads = conf.getInt(GRIDMIX_SUB_THR, noOfSubmitterThreads);
    int queueDep = conf.getInt(GRIDMIX_QUE_DEP, 5);
    submitter = createJobSubmitter(monitor, numThreads, queueDep,
                                   new FilePool(conf, inputDir), userResolver, 
                                   statistics);
    distCacheEmulator = new DistributedCacheEmulator(conf, ioPath);

    factory = createJobFactory(submitter, traceIn, scratchDir, conf, 
                               startFlag, userResolver);
    factory.jobCreator.setDistCacheEmulator(distCacheEmulator);

    if (policy == GridmixJobSubmissionPolicy.SERIAL) {
      statistics.addJobStatsListeners(factory);
    } else {
      statistics.addClusterStatsObservers(factory);
    }

    // add the gridmix run summarizer to the statistics
    statistics.addJobStatsListeners(summarizer.getExecutionSummarizer());
    statistics.addClusterStatsObservers(summarizer.getClusterSummarizer());
    
    monitor.start();
    submitter.start();
  }catch(Exception e) {
    LOG.error(" Exception at start " ,e);
    throw new IOException(e);
  }
 }
 
開發者ID:naver,項目名稱:hadoop,代碼行數:55,代碼來源:Gridmix.java

示例14: getValue

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/** @return the value or, if it is null, return the default from conf. */
public short getValue(final Configuration conf) {
  return getValue() != null? getValue()
      : (short)conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:6,代碼來源:ReplicationParam.java

示例15: getRandomTextDataGeneratorListSize

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Get the configured random text data generator's list size.
 */
static int getRandomTextDataGeneratorListSize(Configuration conf) {
  return conf.getInt(GRIDMIX_DATAGEN_RANDOMTEXT_LISTSIZE, DEFAULT_LIST_SIZE);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:7,代碼來源:RandomTextDataGenerator.java


注:本文中的org.apache.hadoop.conf.Configuration.getInt方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。