當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.getBoolean方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.getBoolean方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.getBoolean方法的具體用法?Java Configuration.getBoolean怎麽用?Java Configuration.getBoolean使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.getBoolean方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: createCompression

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Create a compression instance based on the user's configuration in the given
 * Configuration object.
 * @throws IOException if the specified codec is not available.
 */
static FSImageCompression createCompression(Configuration conf)
  throws IOException {
  boolean compressImage = conf.getBoolean(
    DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,
    DFSConfigKeys.DFS_IMAGE_COMPRESS_DEFAULT);

  if (!compressImage) {
    return createNoopCompression();
  }

  String codecClassName = conf.get(
    DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
    DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT);
  return createCompression(conf, codecClassName);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:FSImageCompression.java

示例2: DefaultMemStore

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Constructor.
 * @param c Comparator
 */
public DefaultMemStore(final Configuration conf,
                final KeyValue.KVComparator c) {
  this.conf = conf;
  this.comparator = c;
  this.cellSet = new CellSkipListSet(c);
  this.snapshot = new CellSkipListSet(c);
  timeRangeTracker = new TimeRangeTracker();
  snapshotTimeRangeTracker = new TimeRangeTracker();
  this.size = new AtomicLong(DEEP_OVERHEAD);
  this.snapshotSize = 0;
  if (conf.getBoolean(USEMSLAB_KEY, USEMSLAB_DEFAULT)) {
    String className = conf.get(MSLAB_CLASS_NAME, HeapMemStoreLAB.class.getName());
    this.allocator = ReflectionUtils.instantiateWithCustomCtor(className,
        new Class[] { Configuration.class }, new Object[] { conf });
  } else {
    this.allocator = null;
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:DefaultMemStore.java

示例3: CacheConfig

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Create a cache configuration using the specified configuration object and
 * family descriptor.
 * @param conf hbase configuration
 * @param family column family configuration
 */
public CacheConfig(Configuration conf, HColumnDescriptor family) {
  this(CacheConfig.instantiateBlockCache(conf),
      family.isBlockCacheEnabled(),
      family.isInMemory(),
      // For the following flags we enable them regardless of per-schema settings
      // if they are enabled in the global configuration.
      conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_DATA_ON_WRITE) || family.isCacheDataOnWrite(),
      conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_INDEXES_ON_WRITE) || family.isCacheIndexesOnWrite(),
      conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_BLOOMS_ON_WRITE) || family.isCacheBloomsOnWrite(),
      conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY,
          DEFAULT_EVICT_ON_CLOSE) || family.isEvictBlocksOnClose(),
      conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
      conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY,
          DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(),
      conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
          HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(),
      conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY,DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
   );
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:CacheConfig.java

示例4: generateDefaultJVMParameters

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private static String generateDefaultJVMParameters(Configuration conf, ApplicationId appid,
    WorkerAttemptId workerAttemptId) {
  int workerMemSizeInMB =
      conf.getInt(AngelConf.ANGEL_WORKER_MEMORY_GB,
          AngelConf.DEFAULT_ANGEL_WORKER_MEMORY_GB) * 1024;

  if(workerMemSizeInMB < 2048) {
    workerMemSizeInMB = 2048;
  }

  boolean isUseDirect = conf.getBoolean(AngelConf.ANGEL_NETTY_MATRIXTRANSFER_CLIENT_USEDIRECTBUFFER,
    AngelConf.DEFAULT_ANGEL_NETTY_MATRIXTRANSFER_CLIENT_USEDIRECTBUFFER);
  int maxUse = workerMemSizeInMB - 512;
  int directRegionSize = 0;
  if(isUseDirect) {
    directRegionSize = (int) (maxUse * 0.3);
  } else {
    directRegionSize = (int) (maxUse * 0.2);
  }
  int heapMax = maxUse - directRegionSize;
  int youngRegionSize = (int) (heapMax * 0.4);
  int survivorRatio = 4;

  String ret =
      new StringBuilder().append(" -Xmx").append(heapMax).append("M").append(" -Xmn")
          .append(youngRegionSize).append("M").append(" -XX:MaxDirectMemorySize=")
          .append(directRegionSize).append("M").append(" -XX:SurvivorRatio=").append(survivorRatio)
          .append(" -XX:PermSize=100M -XX:MaxPermSize=200M").append(" -XX:+AggressiveOpts")
          .append(" -XX:+UseLargePages").append(" -XX:+UseConcMarkSweepGC")
          .append(" -XX:CMSInitiatingOccupancyFraction=70")
          .append(" -XX:+UseCMSInitiatingOccupancyOnly").append(" -XX:+CMSScavengeBeforeRemark")
          .append(" -XX:+UseCMSCompactAtFullCollection").append(" -verbose:gc")
          .append(" -XX:+PrintGCDateStamps").append(" -XX:+PrintGCDetails")
          .append(" -XX:+PrintCommandLineFlags").append(" -XX:+PrintTenuringDistribution")
          .append(" -XX:+PrintAdaptiveSizePolicy").append(" -Xloggc:/tmp/").append("angelgc-")
          .append(appid).append("-").append(workerAttemptId).append(".log").toString();

  return ret;
}
 
開發者ID:Tencent,項目名稱:angel,代碼行數:40,代碼來源:WorkerJVM.java

示例5: isNativeBzip2Loaded

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Check if native-bzip2 code is loaded & initialized correctly and 
 * can be loaded for this job.
 * 
 * @param conf configuration
 * @return <code>true</code> if native-bzip2 is loaded & initialized 
 *         and can be loaded for this job, else <code>false</code>
 */
public static boolean isNativeBzip2Loaded(Configuration conf) {
  String libname = conf.get("io.compression.codec.bzip2.library", 
                            "system-native");
  if (!bzip2LibraryName.equals(libname)) {
    nativeBzip2Loaded = false;
    bzip2LibraryName = libname;
    if (libname.equals("java-builtin")) {
      LOG.info("Using pure-Java version of bzip2 library");
    } else if (conf.getBoolean(
              CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, 
              CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_DEFAULT) &&
        NativeCodeLoader.isNativeCodeLoaded()) {
      try {
        // Initialize the native library.
        Bzip2Compressor.initSymbols(libname);
        Bzip2Decompressor.initSymbols(libname);
        nativeBzip2Loaded = true;
        LOG.info("Successfully loaded & initialized native-bzip2 library " +
                 libname);
      } catch (Throwable t) {
        LOG.warn("Failed to load/initialize native-bzip2 library " + 
                 libname + ", will use pure-Java version");
      }
    }
  }
  return nativeBzip2Loaded;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:36,代碼來源:Bzip2Factory.java

示例6: initialize

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/** Called after a new FileSystem instance is constructed.
 * @param name a uri whose authority section names the host, port, etc.
 *   for this FileSystem
 * @param conf the configuration
 */
public void initialize(URI name, Configuration conf) throws IOException {
  statistics = getStatistics(name.getScheme(), getClass());    
  resolveSymlinks = conf.getBoolean(
      CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY,
      CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_DEFAULT);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:FileSystem.java

示例7: RegionStateStore

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
RegionStateStore(final Server server) {
  Configuration conf = server.getConfiguration();
  // No need to persist if using ZK but not migrating
  noPersistence = ConfigUtil.useZKForAssignment(conf)
    && !conf.getBoolean("hbase.assignment.usezk.migrating", false);
  this.server = server;
  initialized = false;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:9,代碼來源:RegionStateStore.java

示例8: checkAllowFormat

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static void checkAllowFormat(Configuration conf) throws IOException {
  if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, 
      DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
    throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
              + " is set to false for this filesystem, so it "
              + "cannot be formatted. You will need to set "
              + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
              + "to true in order to format this filesystem");
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:11,代碼來源:NameNode.java

示例9: RpcRetryingCallerFactory

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public RpcRetryingCallerFactory(Configuration conf, RetryingCallerInterceptor interceptor) {
  this.conf = conf;
  pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
  retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  startLogErrorsCnt = conf.getInt(AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY,
      AsyncProcess.DEFAULT_START_LOG_ERRORS_AFTER_COUNT);
  this.interceptor = interceptor;
  enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
      HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:RpcRetryingCallerFactory.java

示例10: addDefaultApps

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Add default apps.
 * @param appDir The application directory
 * @throws IOException
 */
protected void addDefaultApps(ContextHandlerCollection parent,
    final String appDir, Configuration conf) throws IOException {
  // set up the context for "/logs/" if "hadoop.log.dir" property is defined.
  String logDir = this.logDir;
  if (logDir == null) {
      logDir = System.getProperty("hadoop.log.dir");
  }
  if (logDir != null) {
    Context logContext = new Context(parent, "/logs");
    logContext.setResourceBase(logDir);
    logContext.addServlet(AdminAuthorizedServlet.class, "/*");
    if (conf.getBoolean(
        ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES,
        ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) {
      @SuppressWarnings("unchecked")
      Map<String, String> params = logContext.getInitParams();
      params.put(
          "org.mortbay.jetty.servlet.Default.aliases", "true");
    }
    logContext.setDisplayName("logs");
    setContextAttributes(logContext, conf);
    addNoCacheFilter(webAppContext);
    defaultContexts.put(logContext, true);
  }
  // set up the context for "/static/*"
  Context staticContext = new Context(parent, "/static");
  staticContext.setResourceBase(appDir + "/static");
  staticContext.addServlet(DefaultServlet.class, "/*");
  staticContext.setDisplayName("static");
  setContextAttributes(staticContext, conf);
  defaultContexts.put(staticContext, true);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:38,代碼來源:HttpServer.java

示例11: dumpConfiguration

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/***
 * Dumps the configuration of hierarchy of queues with 
 * the xml file path given. It is to be used directly ONLY FOR TESTING.
 * @param out the writer object to which dump is written to.
 * @param configFile the filename of xml file
 * @throws IOException
 */
static void dumpConfiguration(Writer out, String configFile,
    Configuration conf) throws IOException {
  if (conf != null && conf.get(DeprecatedQueueConfigurationParser.
      MAPRED_QUEUE_NAMES_KEY) != null) {
    return;
  }
  
  JsonFactory dumpFactory = new JsonFactory();
  JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
  QueueConfigurationParser parser;
  boolean aclsEnabled = false;
  if (conf != null) {
    aclsEnabled = conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
  }
  if (configFile != null && !"".equals(configFile)) {
    parser = new QueueConfigurationParser(configFile, aclsEnabled);
  }
  else {
    parser = getQueueConfigurationParser(null, false, aclsEnabled);
  }
  dumpGenerator.writeStartObject();
  dumpGenerator.writeFieldName("queues");
  dumpGenerator.writeStartArray();
  dumpConfiguration(dumpGenerator,parser.getRoot().getChildren());
  dumpGenerator.writeEndArray();
  dumpGenerator.writeEndObject();
  dumpGenerator.flush();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:36,代碼來源:QueueManager.java

示例12: StripeStoreConfig

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public StripeStoreConfig(Configuration config, StoreConfigInformation sci) {
  this.level0CompactMinFiles = config.getInt(MIN_FILES_L0_KEY, 4);
  this.flushIntoL0 = config.getBoolean(FLUSH_TO_L0_KEY, false);
  int minMinFiles = flushIntoL0 ? 3 : 4; // make sure not to compact tiny files too often.
  int minFiles = config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, -1);
  this.stripeCompactMinFiles = config.getInt(MIN_FILES_KEY, Math.max(minMinFiles, minFiles));
  this.stripeCompactMaxFiles = config.getInt(MAX_FILES_KEY,
      config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10));
  this.maxRegionSplitImbalance = getFloat(config, MAX_REGION_SPLIT_IMBALANCE_KEY, 1.5f, true);

  float splitPartCount = getFloat(config, SPLIT_PARTS_KEY, 2f, true);
  if (Math.abs(splitPartCount - 1.0) < EPSILON) {
    LOG.error("Split part count cannot be 1 (" + splitPartCount + "), using the default");
    splitPartCount = 2f;
  }
  this.splitPartCount = splitPartCount;
  // Arbitrary default split size - 4 times the size of one L0 compaction.
  // If we flush into L0 there's no split compaction, but for default value it is ok.
  double flushSize = sci.getMemstoreFlushSize();
  if (flushSize == 0) {
    flushSize = 128 * 1024 * 1024;
  }
  long defaultSplitSize = (long)(flushSize * getLevel0MinFiles() * 4 * splitPartCount);
  this.sizeToSplitAt = config.getLong(SIZE_TO_SPLIT_KEY, defaultSplitSize);
  int initialCount = config.getInt(INITIAL_STRIPE_COUNT_KEY, 1);
  if (initialCount == 0) {
    LOG.error("Initial stripe count is 0, using the default");
    initialCount = 1;
  }
  this.initialCount = initialCount;
  this.splitPartSize = (long)(this.sizeToSplitAt / this.splitPartCount);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:StripeStoreConfig.java

示例13: initReconfigurable

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private void initReconfigurable(Configuration confToLoad) {
  this.allowFallbackToSimpleAuth = confToLoad.getBoolean(FALLBACK_TO_INSECURE_CLIENT_AUTH, false);
  if (isSecurityEnabled && allowFallbackToSimpleAuth) {
    LOG.warn("********* WARNING! *********");
    LOG.warn("This server is configured to allow connections from INSECURE clients");
    LOG.warn("(" + FALLBACK_TO_INSECURE_CLIENT_AUTH + " = true).");
    LOG.warn("While this option is enabled, client identities cannot be secured, and user");
    LOG.warn("impersonation is possible!");
    LOG.warn("For secure operation, please disable SIMPLE authentication as soon as possible,");
    LOG.warn("by setting " + FALLBACK_TO_INSECURE_CLIENT_AUTH + " = false in hbase-site.xml");
    LOG.warn("****************************");
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:RpcServer.java

示例14: createLogHandler

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
protected LogHandler createLogHandler(Configuration conf, Context context,
    DeletionService deletionService) {
  if (conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
      YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
    return new LogAggregationService(this.dispatcher, context,
        deletionService, dirsHandler);
  } else {
    return new NonAggregatingLogHandler(this.dispatcher, deletionService,
                                        dirsHandler,
                                        context.getNMStateStore());
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:ContainerManagerImpl.java

示例15: isEnabled

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/** Is WebHDFS enabled in conf? */
public static boolean isEnabled(final Configuration conf, final Log log) {
  final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
      DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
  return b;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:7,代碼來源:WebHdfsFileSystem.java


注:本文中的org.apache.hadoop.conf.Configuration.getBoolean方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。