当前位置: 首页>>代码示例>>Java>>正文


Java Configuration.getBoolean方法代码示例

本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getBoolean方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getBoolean方法的具体用法?Java Configuration.getBoolean怎么用?Java Configuration.getBoolean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.getBoolean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createCompression

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Create a compression instance based on the user's configuration in the given
 * Configuration object.
 * @throws IOException if the specified codec is not available.
 */
static FSImageCompression createCompression(Configuration conf)
  throws IOException {
  boolean compressImage = conf.getBoolean(
    DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,
    DFSConfigKeys.DFS_IMAGE_COMPRESS_DEFAULT);

  if (!compressImage) {
    return createNoopCompression();
  }

  String codecClassName = conf.get(
    DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
    DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT);
  return createCompression(conf, codecClassName);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:FSImageCompression.java

示例2: DefaultMemStore

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Constructor.
 * @param c Comparator
 */
public DefaultMemStore(final Configuration conf,
                final KeyValue.KVComparator c) {
  this.conf = conf;
  this.comparator = c;
  this.cellSet = new CellSkipListSet(c);
  this.snapshot = new CellSkipListSet(c);
  timeRangeTracker = new TimeRangeTracker();
  snapshotTimeRangeTracker = new TimeRangeTracker();
  this.size = new AtomicLong(DEEP_OVERHEAD);
  this.snapshotSize = 0;
  if (conf.getBoolean(USEMSLAB_KEY, USEMSLAB_DEFAULT)) {
    String className = conf.get(MSLAB_CLASS_NAME, HeapMemStoreLAB.class.getName());
    this.allocator = ReflectionUtils.instantiateWithCustomCtor(className,
        new Class[] { Configuration.class }, new Object[] { conf });
  } else {
    this.allocator = null;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:DefaultMemStore.java

示例3: CacheConfig

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Create a cache configuration using the specified configuration object and
 * family descriptor.
 * @param conf hbase configuration
 * @param family column family configuration
 */
public CacheConfig(Configuration conf, HColumnDescriptor family) {
  this(CacheConfig.instantiateBlockCache(conf),
      family.isBlockCacheEnabled(),
      family.isInMemory(),
      // For the following flags we enable them regardless of per-schema settings
      // if they are enabled in the global configuration.
      conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_DATA_ON_WRITE) || family.isCacheDataOnWrite(),
      conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_INDEXES_ON_WRITE) || family.isCacheIndexesOnWrite(),
      conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_BLOOMS_ON_WRITE) || family.isCacheBloomsOnWrite(),
      conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY,
          DEFAULT_EVICT_ON_CLOSE) || family.isEvictBlocksOnClose(),
      conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
      conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY,
          DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(),
      conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
          HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(),
      conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY,DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
   );
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:CacheConfig.java

示例4: generateDefaultJVMParameters

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static String generateDefaultJVMParameters(Configuration conf, ApplicationId appid,
    WorkerAttemptId workerAttemptId) {
  int workerMemSizeInMB =
      conf.getInt(AngelConf.ANGEL_WORKER_MEMORY_GB,
          AngelConf.DEFAULT_ANGEL_WORKER_MEMORY_GB) * 1024;

  if(workerMemSizeInMB < 2048) {
    workerMemSizeInMB = 2048;
  }

  boolean isUseDirect = conf.getBoolean(AngelConf.ANGEL_NETTY_MATRIXTRANSFER_CLIENT_USEDIRECTBUFFER,
    AngelConf.DEFAULT_ANGEL_NETTY_MATRIXTRANSFER_CLIENT_USEDIRECTBUFFER);
  int maxUse = workerMemSizeInMB - 512;
  int directRegionSize = 0;
  if(isUseDirect) {
    directRegionSize = (int) (maxUse * 0.3);
  } else {
    directRegionSize = (int) (maxUse * 0.2);
  }
  int heapMax = maxUse - directRegionSize;
  int youngRegionSize = (int) (heapMax * 0.4);
  int survivorRatio = 4;

  String ret =
      new StringBuilder().append(" -Xmx").append(heapMax).append("M").append(" -Xmn")
          .append(youngRegionSize).append("M").append(" -XX:MaxDirectMemorySize=")
          .append(directRegionSize).append("M").append(" -XX:SurvivorRatio=").append(survivorRatio)
          .append(" -XX:PermSize=100M -XX:MaxPermSize=200M").append(" -XX:+AggressiveOpts")
          .append(" -XX:+UseLargePages").append(" -XX:+UseConcMarkSweepGC")
          .append(" -XX:CMSInitiatingOccupancyFraction=70")
          .append(" -XX:+UseCMSInitiatingOccupancyOnly").append(" -XX:+CMSScavengeBeforeRemark")
          .append(" -XX:+UseCMSCompactAtFullCollection").append(" -verbose:gc")
          .append(" -XX:+PrintGCDateStamps").append(" -XX:+PrintGCDetails")
          .append(" -XX:+PrintCommandLineFlags").append(" -XX:+PrintTenuringDistribution")
          .append(" -XX:+PrintAdaptiveSizePolicy").append(" -Xloggc:/tmp/").append("angelgc-")
          .append(appid).append("-").append(workerAttemptId).append(".log").toString();

  return ret;
}
 
开发者ID:Tencent,项目名称:angel,代码行数:40,代码来源:WorkerJVM.java

示例5: isNativeBzip2Loaded

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Check if native-bzip2 code is loaded & initialized correctly and 
 * can be loaded for this job.
 * 
 * @param conf configuration
 * @return <code>true</code> if native-bzip2 is loaded & initialized 
 *         and can be loaded for this job, else <code>false</code>
 */
public static boolean isNativeBzip2Loaded(Configuration conf) {
  String libname = conf.get("io.compression.codec.bzip2.library", 
                            "system-native");
  if (!bzip2LibraryName.equals(libname)) {
    nativeBzip2Loaded = false;
    bzip2LibraryName = libname;
    if (libname.equals("java-builtin")) {
      LOG.info("Using pure-Java version of bzip2 library");
    } else if (conf.getBoolean(
              CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, 
              CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_DEFAULT) &&
        NativeCodeLoader.isNativeCodeLoaded()) {
      try {
        // Initialize the native library.
        Bzip2Compressor.initSymbols(libname);
        Bzip2Decompressor.initSymbols(libname);
        nativeBzip2Loaded = true;
        LOG.info("Successfully loaded & initialized native-bzip2 library " +
                 libname);
      } catch (Throwable t) {
        LOG.warn("Failed to load/initialize native-bzip2 library " + 
                 libname + ", will use pure-Java version");
      }
    }
  }
  return nativeBzip2Loaded;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:Bzip2Factory.java

示例6: initialize

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/** Called after a new FileSystem instance is constructed.
 * @param name a uri whose authority section names the host, port, etc.
 *   for this FileSystem
 * @param conf the configuration
 */
public void initialize(URI name, Configuration conf) throws IOException {
  statistics = getStatistics(name.getScheme(), getClass());    
  resolveSymlinks = conf.getBoolean(
      CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY,
      CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_DEFAULT);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:FileSystem.java

示例7: RegionStateStore

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
RegionStateStore(final Server server) {
  Configuration conf = server.getConfiguration();
  // No need to persist if using ZK but not migrating
  noPersistence = ConfigUtil.useZKForAssignment(conf)
    && !conf.getBoolean("hbase.assignment.usezk.migrating", false);
  this.server = server;
  initialized = false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:RegionStateStore.java

示例8: checkAllowFormat

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static void checkAllowFormat(Configuration conf) throws IOException {
  if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, 
      DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
    throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
              + " is set to false for this filesystem, so it "
              + "cannot be formatted. You will need to set "
              + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
              + "to true in order to format this filesystem");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:NameNode.java

示例9: RpcRetryingCallerFactory

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public RpcRetryingCallerFactory(Configuration conf, RetryingCallerInterceptor interceptor) {
  this.conf = conf;
  pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
  retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  startLogErrorsCnt = conf.getInt(AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY,
      AsyncProcess.DEFAULT_START_LOG_ERRORS_AFTER_COUNT);
  this.interceptor = interceptor;
  enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
      HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:RpcRetryingCallerFactory.java

示例10: addDefaultApps

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Add default apps.
 * @param appDir The application directory
 * @throws IOException
 */
protected void addDefaultApps(ContextHandlerCollection parent,
    final String appDir, Configuration conf) throws IOException {
  // set up the context for "/logs/" if "hadoop.log.dir" property is defined.
  String logDir = this.logDir;
  if (logDir == null) {
      logDir = System.getProperty("hadoop.log.dir");
  }
  if (logDir != null) {
    Context logContext = new Context(parent, "/logs");
    logContext.setResourceBase(logDir);
    logContext.addServlet(AdminAuthorizedServlet.class, "/*");
    if (conf.getBoolean(
        ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES,
        ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) {
      @SuppressWarnings("unchecked")
      Map<String, String> params = logContext.getInitParams();
      params.put(
          "org.mortbay.jetty.servlet.Default.aliases", "true");
    }
    logContext.setDisplayName("logs");
    setContextAttributes(logContext, conf);
    addNoCacheFilter(webAppContext);
    defaultContexts.put(logContext, true);
  }
  // set up the context for "/static/*"
  Context staticContext = new Context(parent, "/static");
  staticContext.setResourceBase(appDir + "/static");
  staticContext.addServlet(DefaultServlet.class, "/*");
  staticContext.setDisplayName("static");
  setContextAttributes(staticContext, conf);
  defaultContexts.put(staticContext, true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:HttpServer.java

示例11: dumpConfiguration

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/***
 * Dumps the configuration of hierarchy of queues with 
 * the xml file path given. It is to be used directly ONLY FOR TESTING.
 * @param out the writer object to which dump is written to.
 * @param configFile the filename of xml file
 * @throws IOException
 */
static void dumpConfiguration(Writer out, String configFile,
    Configuration conf) throws IOException {
  if (conf != null && conf.get(DeprecatedQueueConfigurationParser.
      MAPRED_QUEUE_NAMES_KEY) != null) {
    return;
  }
  
  JsonFactory dumpFactory = new JsonFactory();
  JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
  QueueConfigurationParser parser;
  boolean aclsEnabled = false;
  if (conf != null) {
    aclsEnabled = conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
  }
  if (configFile != null && !"".equals(configFile)) {
    parser = new QueueConfigurationParser(configFile, aclsEnabled);
  }
  else {
    parser = getQueueConfigurationParser(null, false, aclsEnabled);
  }
  dumpGenerator.writeStartObject();
  dumpGenerator.writeFieldName("queues");
  dumpGenerator.writeStartArray();
  dumpConfiguration(dumpGenerator,parser.getRoot().getChildren());
  dumpGenerator.writeEndArray();
  dumpGenerator.writeEndObject();
  dumpGenerator.flush();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:QueueManager.java

示例12: StripeStoreConfig

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public StripeStoreConfig(Configuration config, StoreConfigInformation sci) {
  this.level0CompactMinFiles = config.getInt(MIN_FILES_L0_KEY, 4);
  this.flushIntoL0 = config.getBoolean(FLUSH_TO_L0_KEY, false);
  int minMinFiles = flushIntoL0 ? 3 : 4; // make sure not to compact tiny files too often.
  int minFiles = config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, -1);
  this.stripeCompactMinFiles = config.getInt(MIN_FILES_KEY, Math.max(minMinFiles, minFiles));
  this.stripeCompactMaxFiles = config.getInt(MAX_FILES_KEY,
      config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10));
  this.maxRegionSplitImbalance = getFloat(config, MAX_REGION_SPLIT_IMBALANCE_KEY, 1.5f, true);

  float splitPartCount = getFloat(config, SPLIT_PARTS_KEY, 2f, true);
  if (Math.abs(splitPartCount - 1.0) < EPSILON) {
    LOG.error("Split part count cannot be 1 (" + splitPartCount + "), using the default");
    splitPartCount = 2f;
  }
  this.splitPartCount = splitPartCount;
  // Arbitrary default split size - 4 times the size of one L0 compaction.
  // If we flush into L0 there's no split compaction, but for default value it is ok.
  double flushSize = sci.getMemstoreFlushSize();
  if (flushSize == 0) {
    flushSize = 128 * 1024 * 1024;
  }
  long defaultSplitSize = (long)(flushSize * getLevel0MinFiles() * 4 * splitPartCount);
  this.sizeToSplitAt = config.getLong(SIZE_TO_SPLIT_KEY, defaultSplitSize);
  int initialCount = config.getInt(INITIAL_STRIPE_COUNT_KEY, 1);
  if (initialCount == 0) {
    LOG.error("Initial stripe count is 0, using the default");
    initialCount = 1;
  }
  this.initialCount = initialCount;
  this.splitPartSize = (long)(this.sizeToSplitAt / this.splitPartCount);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:StripeStoreConfig.java

示例13: initReconfigurable

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private void initReconfigurable(Configuration confToLoad) {
  this.allowFallbackToSimpleAuth = confToLoad.getBoolean(FALLBACK_TO_INSECURE_CLIENT_AUTH, false);
  if (isSecurityEnabled && allowFallbackToSimpleAuth) {
    LOG.warn("********* WARNING! *********");
    LOG.warn("This server is configured to allow connections from INSECURE clients");
    LOG.warn("(" + FALLBACK_TO_INSECURE_CLIENT_AUTH + " = true).");
    LOG.warn("While this option is enabled, client identities cannot be secured, and user");
    LOG.warn("impersonation is possible!");
    LOG.warn("For secure operation, please disable SIMPLE authentication as soon as possible,");
    LOG.warn("by setting " + FALLBACK_TO_INSECURE_CLIENT_AUTH + " = false in hbase-site.xml");
    LOG.warn("****************************");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:RpcServer.java

示例14: createLogHandler

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
protected LogHandler createLogHandler(Configuration conf, Context context,
    DeletionService deletionService) {
  if (conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
      YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
    return new LogAggregationService(this.dispatcher, context,
        deletionService, dirsHandler);
  } else {
    return new NonAggregatingLogHandler(this.dispatcher, deletionService,
                                        dirsHandler,
                                        context.getNMStateStore());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:ContainerManagerImpl.java

示例15: isEnabled

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/** Is WebHDFS enabled in conf? */
public static boolean isEnabled(final Configuration conf, final Log log) {
  final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
      DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
  return b;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:WebHdfsFileSystem.java


注:本文中的org.apache.hadoop.conf.Configuration.getBoolean方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。