当前位置: 首页>>代码示例>>Java>>正文


Java Configuration.getTrimmedStrings方法代码示例

本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getTrimmedStrings方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getTrimmedStrings方法的具体用法?Java Configuration.getTrimmedStrings怎么用?Java Configuration.getTrimmedStrings使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.getTrimmedStrings方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: SerializationFactory

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * <p>
 * Serializations are found by reading the <code>io.serializations</code>
 * property from <code>conf</code>, which is a comma-delimited list of
 * classnames.
 * </p>
 */
public SerializationFactory(Configuration conf) {
  super(conf);
  if (conf.get(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY).equals("")) {
    LOG.warn("Serialization for various data types may not be available. Please configure "
        + CommonConfigurationKeys.IO_SERIALIZATIONS_KEY
        + " properly to have serialization support (it is currently not set).");
  } else {
    for (String serializerName : conf.getTrimmedStrings(
        CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, new String[] {
            WritableSerialization.class.getName(),
            AvroSpecificSerialization.class.getName(),
            AvroReflectSerialization.class.getName() })) {
      add(conf, serializerName);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:24,代码来源:SerializationFactory.java

示例2: TopConf

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public TopConf(Configuration conf) {
  isEnabled = conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY,
      DFSConfigKeys.NNTOP_ENABLED_DEFAULT);
  String[] periodsStr = conf.getTrimmedStrings(
      DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY,
      DFSConfigKeys.NNTOP_WINDOWS_MINUTES_DEFAULT);
  nntopReportingPeriodsMs = new int[periodsStr.length];
  for (int i = 0; i < periodsStr.length; i++) {
    nntopReportingPeriodsMs[i] = Ints.checkedCast(
        TimeUnit.MINUTES.toMillis(Integer.parseInt(periodsStr[i])));
  }
  for (int aPeriodMs: nntopReportingPeriodsMs) {
    Preconditions.checkArgument(aPeriodMs >= TimeUnit.MINUTES.toMillis(1),
        "minimum reporting period is 1 min!");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TopConf.java

示例3: setConf

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
public void setConf(Configuration conf) {
  this.conf = conf;
  properties = new TreeMap<String,String>();
  String[] qop = conf.getTrimmedStrings(
      CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION,
      QualityOfProtection.AUTHENTICATION.toString());
  for (int i=0; i < qop.length; i++) {
    qop[i] = QualityOfProtection.valueOf(
        StringUtils.toUpperCase(qop[i])).getSaslQop();
  }
  properties.put(Sasl.QOP, StringUtils.join(",", qop));
  properties.put(Sasl.SERVER_AUTH, "true");
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:15,代码来源:SaslPropertiesResolver.java

示例4: refresh

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static void refresh(Configuration conf){
  Collection<String> tempServers = new HashSet<String>();
  // trusted proxy servers such as http proxies
  for (String host : conf.getTrimmedStrings(CONF_HADOOP_PROXYSERVERS)) {
    InetSocketAddress addr = new InetSocketAddress(host, 0);
    if (!addr.isUnresolved()) {
      tempServers.add(addr.getAddress().getHostAddress());
    }
  }
  proxyServers = tempServers;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:12,代码来源:ProxyServers.java

示例5: loadSpanReceivers

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Reads the names of classes specified in the
 * "hadoop.htrace.spanreceiver.classes" property and instantiates and registers
 * them with the Tracer as SpanReceiver's.
 *
 * The nullary constructor is called during construction, but if the classes
 * specified implement the Configurable interface, setConfiguration() will be
 * called on them. This allows SpanReceivers to use values from the Hadoop
 * configuration.
 */
public synchronized void loadSpanReceivers(Configuration conf) {
  config = new Configuration(conf);
  String receiverKey = confPrefix + SPAN_RECEIVERS_CONF_SUFFIX;
  String[] receiverNames = config.getTrimmedStrings(receiverKey);
  if (receiverNames == null || receiverNames.length == 0) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("No span receiver names found in " + receiverKey + ".");
    }
    return;
  }
  // It's convenient to have each daemon log to a random trace file when
  // testing.
  String pathKey = confPrefix + LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX;
  if (config.get(pathKey) == null) {
    String uniqueFile = getUniqueLocalTraceFileName();
    config.set(pathKey, uniqueFile);
    if (LOG.isTraceEnabled()) {
      LOG.trace("Set " + pathKey + " to " + uniqueFile);
    }
  }
  for (String className : receiverNames) {
    try {
      SpanReceiver rcvr = loadInstance(className, EMPTY);
      Trace.addReceiver(rcvr);
      receivers.put(highestId++, rcvr);
      LOG.info("Loaded SpanReceiver " + className + " successfully.");
    } catch (IOException e) {
      LOG.error("Failed to load SpanReceiver", e);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:SpanReceiverHost.java

示例6: SerializationFactory

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * <p>
 * Serializations are found by reading the <code>io.serializations</code>
 * property from <code>conf</code>, which is a comma-delimited list of
 * classnames.
 * </p>
 */
public SerializationFactory(Configuration conf) {
  super(conf);
  for (String serializerName : conf.getTrimmedStrings(
    CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
    new String[]{WritableSerialization.class.getName(),
      AvroSpecificSerialization.class.getName(),
      AvroReflectSerialization.class.getName()})) {
    add(conf, serializerName);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:SerializationFactory.java

示例7: NuCypherExtClient

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
  * Create a new NuCypherExtClient connected to the given nameNodeUri or rpcNamenode.
  * If HA is enabled and a positive value is set for
  * {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY}
  * in the configuration, the DFSClient will use
  * {@link LossyRetryInvocationHandler} as its RetryInvocationHandler.
  * Otherwise one of nameNodeUri or rpcNamenode must be null.
  */
 @VisibleForTesting
 public NuCypherExtClient(URI nameNodeUri, NuCypherExtClientProtocol rpcNamenode,
                        Configuration conf, FileSystem.Statistics stats) throws IOException {
//   this.dfsClientConf = new DfsClientConf(conf);
   this.conf = conf;
   this.stats = stats;
   this.socketFactory = NetUtils.getSocketFactory(conf, NuCypherExtClientProtocol.class);

   this.ugi = UserGroupInformation.getCurrentUser();

   this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
   this.clientName = "NuCypherExtClient_" + conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE") + "_" +
       ThreadLocalRandom.current().nextInt()  + "_" +
       Thread.currentThread().getId();
   int numResponseToDrop = conf.getInt(
       DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
       DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
   ProxyAndInfo<NuCypherExtClientProtocol> proxyInfo = null;
   AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);

   if (numResponseToDrop > 0) {
     // This case is used for testing.
     LOG.warn(DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
         + " is set to " + numResponseToDrop
         + ", this hacked client will proactively drop responses");
     proxyInfo = NuCypherExtNameNodeProxiesClient.createProxyWithLossyRetryHandler(conf,
         nameNodeUri, NuCypherExtClientProtocol.class, numResponseToDrop,
         nnFallbackToSimpleAuth);
   }

   if (proxyInfo != null) {
     // this.dtService = proxyInfo.getDelegationTokenService();
     this.namenode = proxyInfo.getProxy();
   } else if (rpcNamenode != null) {
     // This case is used for testing.
     Preconditions.checkArgument(nameNodeUri == null);
     this.namenode = rpcNamenode;
     //dtService = null;
   } else {
     Preconditions.checkArgument(nameNodeUri != null,
         "null URI");
     proxyInfo = NuCypherExtNameNodeProxiesClient.createProxyWithNuCypherExtClientProtocol(conf,
         nameNodeUri, nnFallbackToSimpleAuth);
     // this.dtService = proxyInfo.getDelegationTokenService();
     this.namenode = proxyInfo.getProxy();
   }

   String localInterfaces[] =
       conf.getTrimmedStrings(DFS_CLIENT_LOCAL_INTERFACES);
   localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
   if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
     LOG.debug("Using local interfaces [" +
         Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
         Joiner.on(',').join(localInterfaceAddrs) + "]");
   }
 }
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:65,代码来源:NuCypherExtClient.java

示例8: getSystemClasses

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@VisibleForTesting
static String[] getSystemClasses(Configuration conf) {
  return conf.getTrimmedStrings(
      MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:MRApps.java

示例9: FsDatasetImpl

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * An FSDataset has a directory where it loads its data files.
 */
FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf
    ) throws IOException {
  this.fsRunning = true;
  this.datanode = datanode;
  this.dataStorage = storage;
  this.conf = conf;
  // The number of volumes required for operation is the total number 
  // of volumes minus the number of failed volumes we can tolerate.
  final int volFailuresTolerated =
    conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
                DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);

  String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
  Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
  List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
      dataLocations, storage);

  int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
  int volsFailed = volumeFailureInfos.size();
  this.validVolsRequired = volsConfigured - volFailuresTolerated;

  if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
    throw new DiskErrorException("Invalid volume failure "
        + " config value: " + volFailuresTolerated);
  }
  if (volsFailed > volFailuresTolerated) {
    throw new DiskErrorException("Too many failed volumes - "
        + "current valid volumes: " + storage.getNumStorageDirs() 
        + ", volumes configured: " + volsConfigured 
        + ", volumes failed: " + volsFailed
        + ", volume failures tolerated: " + volFailuresTolerated);
  }

  storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
  volumeMap = new ReplicaMap(this);
  ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);

  @SuppressWarnings("unchecked")
  final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
      ReflectionUtils.newInstance(conf.getClass(
          DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
          RoundRobinVolumeChoosingPolicy.class,
          VolumeChoosingPolicy.class), conf);
  volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
      blockChooserImpl);
  asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
  asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
  deletingBlock = new HashMap<String, Set<Long>>();

  for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
    addVolume(dataLocations, storage.getStorageDir(idx));
  }
  setupAsyncLazyPersistThreads();

  cacheManager = new FsDatasetCache(this);

  // Start the lazy writer once we have built the replica maps.
  lazyWriter = new Daemon(new LazyWriter(conf));
  lazyWriter.start();
  registerMBean(datanode.getDatanodeUuid());
  localFS = FileSystem.getLocal(conf);
  blockPinningEnabled = conf.getBoolean(
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:FsDatasetImpl.java


注:本文中的org.apache.hadoop.conf.Configuration.getTrimmedStrings方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。