当前位置: 首页>>代码示例>>Java>>正文


Java Configuration.getTrimmed方法代码示例

本文整理汇总了Java中org.apache.hadoop.conf.Configuration.getTrimmed方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.getTrimmed方法的具体用法?Java Configuration.getTrimmed怎么用?Java Configuration.getTrimmed使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.getTrimmed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: TestEndpoint

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Test if custom endpoint is picked up.
 * <p/>
 * The test expects TEST_ENDPOINT to be defined in the Configuration
 * describing the endpoint of the bucket to which TEST_FS_S3A_NAME points
 * (f.i. "s3-eu-west-1.amazonaws.com" if the bucket is located in Ireland).
 * Evidently, the bucket has to be hosted in the region denoted by the
 * endpoint for the test to succeed.
 * <p/>
 * More info and the list of endpoint identifiers:
 * http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
 *
 * @throws Exception
 */
@Test
public void TestEndpoint() throws Exception {
  conf = new Configuration();
  String endpoint = conf.getTrimmed(TEST_ENDPOINT, "");
  if (endpoint.isEmpty()) {
    LOG.warn("Custom endpoint test skipped as " + TEST_ENDPOINT + "config " +
        "setting was not detected");
  } else {
    conf.set(Constants.ENDPOINT, endpoint);
    fs = S3ATestUtils.createTestFileSystem(conf);
    AmazonS3Client s3 = fs.getAmazonS3Client();
    String endPointRegion = "";
    // Differentiate handling of "s3-" and "s3." based endpoint identifiers
    String[] endpointParts = StringUtils.split(endpoint, '.');
    if (endpointParts.length == 3) {
      endPointRegion = endpointParts[0].substring(3);
    } else if (endpointParts.length == 4) {
      endPointRegion = endpointParts[1];
    } else {
      fail("Unexpected endpoint");
    }
    assertEquals("Endpoint config setting and bucket location differ: ",
        endPointRegion, s3.getBucketLocation(fs.getUri().getHost()));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestS3AConfiguration.java

示例2: getWebAppBindURL

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Get the URL to use for binding where bind hostname can be specified
 * to override the hostname in the webAppURLWithoutScheme. Port specified in the
 * webAppURLWithoutScheme will be used.
 *
 * @param conf the configuration
 * @param hostProperty bind host property name
 * @param webAppURLWithoutScheme web app URL without scheme String
 * @return String representing bind URL
 */
public static String getWebAppBindURL(
    Configuration conf,
    String hostProperty,
    String webAppURLWithoutScheme) {

  // If the bind-host setting exists then it overrides the hostname
  // portion of the corresponding webAppURLWithoutScheme
  String host = conf.getTrimmed(hostProperty);
  if (host != null && !host.isEmpty()) {
    if (webAppURLWithoutScheme.contains(":")) {
      webAppURLWithoutScheme = host + ":" + webAppURLWithoutScheme.split(":")[1];
    }
    else {
      throw new YarnRuntimeException("webAppURLWithoutScheme must include port specification but doesn't: " +
                                     webAppURLWithoutScheme);
    }
  }

  return webAppURLWithoutScheme;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:WebAppUtils.java

示例3: serviceInit

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Init the service.
 * This is where the security bindings are set up
 * @param conf configuration of the service
 * @throws Exception
 */
@Override
protected void serviceInit(Configuration conf) throws Exception {

  registryRoot = conf.getTrimmed(KEY_REGISTRY_ZK_ROOT,
      DEFAULT_ZK_REGISTRY_ROOT);

  // create and add the registy service
  registrySecurity = new RegistrySecurity("registry security");
  addService(registrySecurity);

  if (LOG.isDebugEnabled()) {
    LOG.debug("Creating Registry with root {}", registryRoot);
  }

  super.serviceInit(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:CuratorService.java

示例4: serviceInit

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Init the service: this sets up security based on the configuration
 * @param conf configuration
 * @throws Exception
 */
@Override
protected void serviceInit(Configuration conf) throws Exception {
  super.serviceInit(conf);
  String auth = conf.getTrimmed(KEY_REGISTRY_CLIENT_AUTH,
      REGISTRY_CLIENT_AUTH_ANONYMOUS);

  switch (auth) {
  case REGISTRY_CLIENT_AUTH_KERBEROS:
    access = AccessPolicy.sasl;
    break;
  case REGISTRY_CLIENT_AUTH_DIGEST:
    access = AccessPolicy.digest;
    break;
  case REGISTRY_CLIENT_AUTH_ANONYMOUS:
    access = AccessPolicy.anon;
    break;
  default:
    throw new ServiceStateException(E_UNKNOWN_AUTHENTICATION_MECHANISM
                                    + "\"" + auth + "\"");
  }
  initSecurity();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:RegistrySecurity.java

示例5: setupSecurity

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * set up security. this must be done prior to creating
 * the ZK instance, as it sets up JAAS if that has not been done already.
 *
 * @return true if the cluster has security enabled.
 */
public boolean setupSecurity() throws IOException {
  Configuration conf = getConfig();
  String jaasContext = conf.getTrimmed(KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT);
  secureServer = StringUtils.isNotEmpty(jaasContext);
  if (secureServer) {
    RegistrySecurity.validateContext(jaasContext);
    RegistrySecurity.bindZKToServerJAASContext(jaasContext);
    // policy on failed auth
    System.setProperty(PROP_ZK_ALLOW_FAILED_SASL_CLIENTS,
      conf.get(KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS,
          "true"));

    //needed so that you can use sasl: strings in the registry
    System.setProperty(RegistryInternalConstants.ZOOKEEPER_AUTH_PROVIDER +".1",
        RegistryInternalConstants.SASLAUTHENTICATION_PROVIDER);
    String serverContext =
        System.getProperty(PROP_ZK_SERVER_SASL_CONTEXT);
    addDiagnostics("Server JAAS context s = %s", serverContext);
    return true;
  } else {
    return false;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:MicroZookeeperService.java

示例6: checkAndSetRMRPCAddress

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static void checkAndSetRMRPCAddress(String prefix, String RMId,
    Configuration conf) {
  String rpcAddressConfKey = null;
  try {
    rpcAddressConfKey = addSuffix(prefix, RMId);
    if (conf.getTrimmed(rpcAddressConfKey) == null) {
      String hostNameConfKey = addSuffix(YarnConfiguration.RM_HOSTNAME, RMId);
      String confVal = conf.getTrimmed(hostNameConfKey);
      if (confVal == null) {
        throwBadConfigurationException(getNeedToSetValueMessage(
            hostNameConfKey + " or " + addSuffix(prefix, RMId)));
      } else {
        conf.set(addSuffix(prefix, RMId), confVal + ":"
            + YarnConfiguration.getRMDefaultPortNumber(prefix, conf));
      }
    }
  } catch (IllegalArgumentException iae) {
    String errmsg = iae.getMessage();
    if (rpcAddressConfKey == null) {
      // Error at addSuffix
      errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID, RMId);
    }
    throwBadConfigurationException(errmsg);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:HAUtil.java

示例7: getNameNodeId

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Get the namenode Id by matching the {@code addressKey}
 * with the the address of the local node.
 * 
 * If {@link DFSConfigKeys#DFS_HA_NAMENODE_ID_KEY} is not specifically
 * configured, this method determines the namenode Id by matching the local
 * node's address with the configured addresses. When a match is found, it
 * returns the namenode Id from the corresponding configuration key.
 * 
 * @param conf Configuration
 * @return namenode Id on success, null on failure.
 * @throws HadoopIllegalArgumentException on error
 */
public static String getNameNodeId(Configuration conf, String nsId) {
  String namenodeId = conf.getTrimmed(DFS_HA_NAMENODE_ID_KEY);
  if (namenodeId != null) {
    return namenodeId;
  }
  
  String suffixes[] = DFSUtil.getSuffixIDs(conf, DFS_NAMENODE_RPC_ADDRESS_KEY,
      nsId, null, DFSUtil.LOCAL_ADDRESS_MATCHER);
  if (suffixes == null) {
    String msg = "Configuration " + DFS_NAMENODE_RPC_ADDRESS_KEY + 
        " must be suffixed with nameservice and namenode ID for HA " +
        "configuration.";
    throw new HadoopIllegalArgumentException(msg);
  }
  
  return suffixes[1];
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:HAUtil.java

示例8: createTestFileSystem

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static S3AFileSystem createTestFileSystem(Configuration conf) throws
    IOException {
  String fsname = conf.getTrimmed(TestS3AFileSystemContract.TEST_FS_S3A_NAME, "");


  boolean liveTest = !StringUtils.isEmpty(fsname);
  URI testURI = null;
  if (liveTest) {
    testURI = URI.create(fsname);
    liveTest = testURI.getScheme().equals(Constants.FS_S3A);
  }
  if (!liveTest) {
    // This doesn't work with our JUnit 3 style test cases, so instead we'll
    // make this whole class not run by default
    throw new AssumptionViolatedException(
        "No test filesystem in " + TestS3AFileSystemContract.TEST_FS_S3A_NAME);
  }
  S3AFileSystem fs1 = new S3AFileSystem();
  //enable purging in tests
  conf.setBoolean(Constants.PURGE_EXISTING_MULTIPART, true);
  conf.setInt(Constants.PURGE_EXISTING_MULTIPART_AGE, 0);
  fs1.initialize(testURI, conf);
  return fs1;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:S3ATestUtils.java

示例9: getRMHAId

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * @param conf Configuration. Please use verifyAndSetRMHAId to check.
 * @return RM Id on success
 */
public static String getRMHAId(Configuration conf) {
  int found = 0;
  String currentRMId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
  if(currentRMId == null) {
    for(String rmId : getRMHAIds(conf)) {
      String key = addSuffix(YarnConfiguration.RM_ADDRESS, rmId);
      String addr = conf.get(key);
      if (addr == null) {
        continue;
      }
      InetSocketAddress s;
      try {
        s = NetUtils.createSocketAddr(addr);
      } catch (Exception e) {
        LOG.warn("Exception in creating socket address " + addr, e);
        continue;
      }
      if (!s.isUnresolved() && NetUtils.isLocalAddress(s.getAddress())) {
        currentRMId = rmId.trim();
        found++;
      }
    }
  }
  if (found > 1) { // Only one address must match the local address
    String msg = "The HA Configuration has multiple addresses that match "
        + "local node's address.";
    throw new HadoopIllegalArgumentException(msg);
  }
  return currentRMId;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:HAUtil.java

示例10: getConfValueForRMInstance

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public static String getConfValueForRMInstance(String prefix,
                                               Configuration conf) {
  String confKey = getConfKeyForRMInstance(prefix, conf);
  String retVal = conf.getTrimmed(confKey);
  if (LOG.isTraceEnabled()) {
    LOG.trace("getConfValueForRMInstance: prefix = " + prefix +
        "; confKey being looked up = " + confKey +
        "; value being set to = " + retVal);
  }
  return retVal;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:HAUtil.java

示例11: getServiceRpcServerAddress

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Override
protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) {
  String addr = conf.getTrimmed(BN_SERVICE_RPC_ADDRESS_KEY);
  if (addr == null || addr.isEmpty()) {
    return null;
  }
  return NetUtils.createSocketAddr(addr);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:BackupNode.java

示例12: getServiceAddress

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Fetches the address for services to use when connecting to namenode
 * based on the value of fallback returns null if the special
 * address is not specified or returns the default namenode address
 * to be used by both clients and services.
 * Services here are datanodes, backup node, any non client connection
 */
public static InetSocketAddress getServiceAddress(Configuration conf,
                                                      boolean fallback) {
  String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
  if (addr == null || addr.isEmpty()) {
    return fallback ? getAddress(conf) : null;
  }
  return getAddress(addr);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:NameNode.java

示例13: getTrimmedOrNull

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Gets a trimmed value from configuration, or null if no value is defined.
 *
 * @param conf configuration
 * @param key configuration key to get
 * @return trimmed value, or null if no value is defined
 */
private static String getTrimmedOrNull(Configuration conf, String key) {
  String addr = conf.getTrimmed(key);
  if (addr == null || addr.isEmpty()) {
    return null;
  }
  return addr;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:NameNode.java

示例14: getHostnameForSpnegoPrincipal

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static String getHostnameForSpnegoPrincipal(Configuration conf) {
  String addr = conf.getTrimmed(DFS_DATANODE_HTTP_ADDRESS_KEY, null);
  if (addr == null) {
    addr = conf.getTrimmed(DFS_DATANODE_HTTPS_ADDRESS_KEY,
                           DFS_DATANODE_HTTPS_ADDRESS_DEFAULT);
  }
  InetSocketAddress inetSocker = NetUtils.createSocketAddr(addr);
  return inetSocker.getHostString();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:DatanodeHttpServer.java

示例15: getDomainPeerServer

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
static DomainPeerServer getDomainPeerServer(Configuration conf,
    int port) throws IOException {
  String domainSocketPath =
      conf.getTrimmed(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
  if (domainSocketPath.isEmpty()) {
    if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT) &&
       (!conf.getBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
        DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
      LOG.warn("Although short-circuit local reads are configured, " +
          "they are disabled because you didn't configure " +
          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
    }
    return null;
  }
  if (DomainSocket.getLoadingFailureReason() != null) {
    throw new RuntimeException("Although a UNIX domain socket " +
        "path is configured as " + domainSocketPath + ", we cannot " +
        "start a localDataXceiverServer because " +
        DomainSocket.getLoadingFailureReason());
  }
  DomainPeerServer domainPeerServer =
    new DomainPeerServer(domainSocketPath, port);
  domainPeerServer.setReceiveBufferSize(
      HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
  return domainPeerServer;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:DataNode.java


注:本文中的org.apache.hadoop.conf.Configuration.getTrimmed方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。