當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.getTrimmedStrings方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.getTrimmedStrings方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.getTrimmedStrings方法的具體用法?Java Configuration.getTrimmedStrings怎麽用?Java Configuration.getTrimmedStrings使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.getTrimmedStrings方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: SerializationFactory

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * <p>
 * Serializations are found by reading the <code>io.serializations</code>
 * property from <code>conf</code>, which is a comma-delimited list of
 * classnames.
 * </p>
 */
public SerializationFactory(Configuration conf) {
  super(conf);
  if (conf.get(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY).equals("")) {
    LOG.warn("Serialization for various data types may not be available. Please configure "
        + CommonConfigurationKeys.IO_SERIALIZATIONS_KEY
        + " properly to have serialization support (it is currently not set).");
  } else {
    for (String serializerName : conf.getTrimmedStrings(
        CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, new String[] {
            WritableSerialization.class.getName(),
            AvroSpecificSerialization.class.getName(),
            AvroReflectSerialization.class.getName() })) {
      add(conf, serializerName);
    }
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:24,代碼來源:SerializationFactory.java

示例2: TopConf

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public TopConf(Configuration conf) {
  isEnabled = conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY,
      DFSConfigKeys.NNTOP_ENABLED_DEFAULT);
  String[] periodsStr = conf.getTrimmedStrings(
      DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY,
      DFSConfigKeys.NNTOP_WINDOWS_MINUTES_DEFAULT);
  nntopReportingPeriodsMs = new int[periodsStr.length];
  for (int i = 0; i < periodsStr.length; i++) {
    nntopReportingPeriodsMs[i] = Ints.checkedCast(
        TimeUnit.MINUTES.toMillis(Integer.parseInt(periodsStr[i])));
  }
  for (int aPeriodMs: nntopReportingPeriodsMs) {
    Preconditions.checkArgument(aPeriodMs >= TimeUnit.MINUTES.toMillis(1),
        "minimum reporting period is 1 min!");
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:TopConf.java

示例3: setConf

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public void setConf(Configuration conf) {
  this.conf = conf;
  properties = new TreeMap<String,String>();
  String[] qop = conf.getTrimmedStrings(
      CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION,
      QualityOfProtection.AUTHENTICATION.toString());
  for (int i=0; i < qop.length; i++) {
    qop[i] = QualityOfProtection.valueOf(
        StringUtils.toUpperCase(qop[i])).getSaslQop();
  }
  properties.put(Sasl.QOP, StringUtils.join(",", qop));
  properties.put(Sasl.SERVER_AUTH, "true");
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:15,代碼來源:SaslPropertiesResolver.java

示例4: refresh

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static void refresh(Configuration conf){
  Collection<String> tempServers = new HashSet<String>();
  // trusted proxy servers such as http proxies
  for (String host : conf.getTrimmedStrings(CONF_HADOOP_PROXYSERVERS)) {
    InetSocketAddress addr = new InetSocketAddress(host, 0);
    if (!addr.isUnresolved()) {
      tempServers.add(addr.getAddress().getHostAddress());
    }
  }
  proxyServers = tempServers;
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:12,代碼來源:ProxyServers.java

示例5: loadSpanReceivers

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Reads the names of classes specified in the
 * "hadoop.htrace.spanreceiver.classes" property and instantiates and registers
 * them with the Tracer as SpanReceiver's.
 *
 * The nullary constructor is called during construction, but if the classes
 * specified implement the Configurable interface, setConfiguration() will be
 * called on them. This allows SpanReceivers to use values from the Hadoop
 * configuration.
 */
public synchronized void loadSpanReceivers(Configuration conf) {
  config = new Configuration(conf);
  String receiverKey = confPrefix + SPAN_RECEIVERS_CONF_SUFFIX;
  String[] receiverNames = config.getTrimmedStrings(receiverKey);
  if (receiverNames == null || receiverNames.length == 0) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("No span receiver names found in " + receiverKey + ".");
    }
    return;
  }
  // It's convenient to have each daemon log to a random trace file when
  // testing.
  String pathKey = confPrefix + LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX;
  if (config.get(pathKey) == null) {
    String uniqueFile = getUniqueLocalTraceFileName();
    config.set(pathKey, uniqueFile);
    if (LOG.isTraceEnabled()) {
      LOG.trace("Set " + pathKey + " to " + uniqueFile);
    }
  }
  for (String className : receiverNames) {
    try {
      SpanReceiver rcvr = loadInstance(className, EMPTY);
      Trace.addReceiver(rcvr);
      receivers.put(highestId++, rcvr);
      LOG.info("Loaded SpanReceiver " + className + " successfully.");
    } catch (IOException e) {
      LOG.error("Failed to load SpanReceiver", e);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:42,代碼來源:SpanReceiverHost.java

示例6: SerializationFactory

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * <p>
 * Serializations are found by reading the <code>io.serializations</code>
 * property from <code>conf</code>, which is a comma-delimited list of
 * classnames.
 * </p>
 */
public SerializationFactory(Configuration conf) {
  super(conf);
  for (String serializerName : conf.getTrimmedStrings(
    CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
    new String[]{WritableSerialization.class.getName(),
      AvroSpecificSerialization.class.getName(),
      AvroReflectSerialization.class.getName()})) {
    add(conf, serializerName);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:SerializationFactory.java

示例7: NuCypherExtClient

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
  * Create a new NuCypherExtClient connected to the given nameNodeUri or rpcNamenode.
  * If HA is enabled and a positive value is set for
  * {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY}
  * in the configuration, the DFSClient will use
  * {@link LossyRetryInvocationHandler} as its RetryInvocationHandler.
  * Otherwise one of nameNodeUri or rpcNamenode must be null.
  */
 @VisibleForTesting
 public NuCypherExtClient(URI nameNodeUri, NuCypherExtClientProtocol rpcNamenode,
                        Configuration conf, FileSystem.Statistics stats) throws IOException {
//   this.dfsClientConf = new DfsClientConf(conf);
   this.conf = conf;
   this.stats = stats;
   this.socketFactory = NetUtils.getSocketFactory(conf, NuCypherExtClientProtocol.class);

   this.ugi = UserGroupInformation.getCurrentUser();

   this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
   this.clientName = "NuCypherExtClient_" + conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE") + "_" +
       ThreadLocalRandom.current().nextInt()  + "_" +
       Thread.currentThread().getId();
   int numResponseToDrop = conf.getInt(
       DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
       DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
   ProxyAndInfo<NuCypherExtClientProtocol> proxyInfo = null;
   AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);

   if (numResponseToDrop > 0) {
     // This case is used for testing.
     LOG.warn(DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
         + " is set to " + numResponseToDrop
         + ", this hacked client will proactively drop responses");
     proxyInfo = NuCypherExtNameNodeProxiesClient.createProxyWithLossyRetryHandler(conf,
         nameNodeUri, NuCypherExtClientProtocol.class, numResponseToDrop,
         nnFallbackToSimpleAuth);
   }

   if (proxyInfo != null) {
     // this.dtService = proxyInfo.getDelegationTokenService();
     this.namenode = proxyInfo.getProxy();
   } else if (rpcNamenode != null) {
     // This case is used for testing.
     Preconditions.checkArgument(nameNodeUri == null);
     this.namenode = rpcNamenode;
     //dtService = null;
   } else {
     Preconditions.checkArgument(nameNodeUri != null,
         "null URI");
     proxyInfo = NuCypherExtNameNodeProxiesClient.createProxyWithNuCypherExtClientProtocol(conf,
         nameNodeUri, nnFallbackToSimpleAuth);
     // this.dtService = proxyInfo.getDelegationTokenService();
     this.namenode = proxyInfo.getProxy();
   }

   String localInterfaces[] =
       conf.getTrimmedStrings(DFS_CLIENT_LOCAL_INTERFACES);
   localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
   if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
     LOG.debug("Using local interfaces [" +
         Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
         Joiner.on(',').join(localInterfaceAddrs) + "]");
   }
 }
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:65,代碼來源:NuCypherExtClient.java

示例8: getSystemClasses

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@VisibleForTesting
static String[] getSystemClasses(Configuration conf) {
  return conf.getTrimmedStrings(
      MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:6,代碼來源:MRApps.java

示例9: FsDatasetImpl

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * An FSDataset has a directory where it loads its data files.
 */
FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf
    ) throws IOException {
  this.fsRunning = true;
  this.datanode = datanode;
  this.dataStorage = storage;
  this.conf = conf;
  // The number of volumes required for operation is the total number 
  // of volumes minus the number of failed volumes we can tolerate.
  final int volFailuresTolerated =
    conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
                DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);

  String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
  Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
  List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
      dataLocations, storage);

  int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
  int volsFailed = volumeFailureInfos.size();
  this.validVolsRequired = volsConfigured - volFailuresTolerated;

  if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
    throw new DiskErrorException("Invalid volume failure "
        + " config value: " + volFailuresTolerated);
  }
  if (volsFailed > volFailuresTolerated) {
    throw new DiskErrorException("Too many failed volumes - "
        + "current valid volumes: " + storage.getNumStorageDirs() 
        + ", volumes configured: " + volsConfigured 
        + ", volumes failed: " + volsFailed
        + ", volume failures tolerated: " + volFailuresTolerated);
  }

  storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
  volumeMap = new ReplicaMap(this);
  ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);

  @SuppressWarnings("unchecked")
  final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
      ReflectionUtils.newInstance(conf.getClass(
          DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
          RoundRobinVolumeChoosingPolicy.class,
          VolumeChoosingPolicy.class), conf);
  volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
      blockChooserImpl);
  asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
  asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
  deletingBlock = new HashMap<String, Set<Long>>();

  for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
    addVolume(dataLocations, storage.getStorageDir(idx));
  }
  setupAsyncLazyPersistThreads();

  cacheManager = new FsDatasetCache(this);

  // Start the lazy writer once we have built the replica maps.
  lazyWriter = new Daemon(new LazyWriter(conf));
  lazyWriter.start();
  registerMBean(datanode.getDatanodeUuid());
  localFS = FileSystem.getLocal(conf);
  blockPinningEnabled = conf.getBoolean(
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:69,代碼來源:FsDatasetImpl.java


注:本文中的org.apache.hadoop.conf.Configuration.getTrimmedStrings方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。