當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.get方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.get方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.get方法的具體用法?Java Configuration.get怎麽用?Java Configuration.get使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.get方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: serviceInit

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public void serviceInit(Configuration conf) throws Exception {
  if (rmContext.isHAEnabled()) {
    autoFailoverEnabled = HAUtil.isAutomaticFailoverEnabled(conf);
    if (autoFailoverEnabled) {
      if (HAUtil.isAutomaticFailoverEmbedded(conf)) {
        embeddedElector = createEmbeddedElectorService();
        addIfService(embeddedElector);
      }
    }
  }

  masterServiceBindAddress = conf.getSocketAddr(
      YarnConfiguration.RM_BIND_HOST,
      YarnConfiguration.RM_ADMIN_ADDRESS,
      YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
      YarnConfiguration.DEFAULT_RM_ADMIN_PORT);
  daemonUser = UserGroupInformation.getCurrentUser();
  authorizer = YarnAuthorizationProvider.getInstance(conf);
  authorizer.setAdmins(getAdminAclList(conf), UserGroupInformation
      .getCurrentUser());
  rmId = conf.get(YarnConfiguration.RM_HA_ID);
  super.serviceInit(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:AdminService.java

示例2: addResourceFiles

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private static void addResourceFiles(Configuration conf, String fileNames)
  throws MalformedURLException {
  String[] fileNameArray = fileNames.split(",");
  StringBuilder sb = new StringBuilder();
  for (int i = 0; i < fileNameArray.length; i++) {
    if (i != 0) {
      sb.append(",");
    }
    URL url = new File(fileNameArray[i]).toURI().toURL();
    sb.append(url.toString());
  }

  String addJars = conf.get(AngelConf.ANGEL_JOB_LIBJARS);

  if (addJars == null || addJars.trim().isEmpty()) {
    conf.set(AngelConf.ANGEL_JOB_LIBJARS, sb.toString());
  } else {
    conf.set(AngelConf.ANGEL_JOB_LIBJARS, sb.toString() + "," + addJars);
  }
}
 
開發者ID:Tencent,項目名稱:angel,代碼行數:21,代碼來源:ConfUtils.java

示例3: setKMSACLs

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private void setKMSACLs(Configuration conf) {
  Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
  Map<Type, AccessControlList> tempBlacklist = new HashMap<Type, AccessControlList>();
  for (Type aclType : Type.values()) {
    String aclStr = conf.get(aclType.getAclConfigKey(), ACL_DEFAULT);
    tempAcls.put(aclType, new AccessControlList(aclStr));
    String blacklistStr = conf.get(aclType.getBlacklistConfigKey());
    if (blacklistStr != null) {
      // Only add if blacklist is present
      tempBlacklist.put(aclType, new AccessControlList(blacklistStr));
      LOG.info("'{}' Blacklist '{}'", aclType, blacklistStr);
    }
    LOG.info("'{}' ACL '{}'", aclType, aclStr);
  }
  acls = tempAcls;
  blacklistedAcls = tempBlacklist;
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:18,代碼來源:KMSACLs.java

示例4: startInternal

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
protected synchronized void startInternal() throws Exception {
  // create filesystem only now, as part of service-start. By this time, RM is
  // authenticated with kerberos so we are good to create a file-system
  // handle.
  Configuration conf = new Configuration(getConfig());
  conf.setBoolean("dfs.client.retry.policy.enabled", true);
  String retryPolicy =
      conf.get(YarnConfiguration.FS_RM_STATE_STORE_RETRY_POLICY_SPEC,
        YarnConfiguration.DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC);
  conf.set("dfs.client.retry.policy.spec", retryPolicy);

  fs = fsWorkingPath.getFileSystem(conf);
  mkdirsWithRetries(rmDTSecretManagerRoot);
  mkdirsWithRetries(rmAppRoot);
  mkdirsWithRetries(amrmTokenSecretManagerRoot);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:FileSystemRMStateStore.java

示例5: checkOutputSpecs

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
/** {@inheritDoc} */
public void checkOutputSpecs(JobContext context)
    throws IOException, InterruptedException {
  Configuration conf = context.getConfiguration();
  DBConfiguration dbConf = new DBConfiguration(conf);

  // Sanity check all the configuration values we need.
  if (null == conf.get(DBConfiguration.URL_PROPERTY)) {
    throw new IOException("Database connection URL is not set.");
  } else if (null == dbConf.getOutputTableName()) {
    throw new IOException("Table name is not set for export.");
  } else if (null == dbConf.getOutputFieldNames()) {
    throw new IOException(
        "Output field names are null.");
  } else if (null == conf.get(ExportJobBase.SQOOP_EXPORT_UPDATE_COL_KEY)) {
    throw new IOException("Update key column is not set for export.");
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:20,代碼來源:UpdateOutputFormat.java

示例6: preserveFileAttributesForDirectories

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private void preserveFileAttributesForDirectories(Configuration conf) throws IOException {
  String attrSymbols = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  final boolean syncOrOverwrite = syncFolder || overwrite;

  LOG.info("About to preserve attributes: " + attrSymbols);

  EnumSet<FileAttribute> attributes = DistCpUtils.unpackAttributes(attrSymbols);
  final boolean preserveRawXattrs =
      conf.getBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);

  Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
  FileSystem clusterFS = sourceListing.getFileSystem(conf);
  SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
                                    SequenceFile.Reader.file(sourceListing));
  long totalLen = clusterFS.getFileStatus(sourceListing).getLen();

  Path targetRoot = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));

  long preservedEntries = 0;
  try {
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();

    // Iterate over every source path that was copied.
    while (sourceReader.next(srcRelPath, srcFileStatus)) {
      // File-attributes for files are set at the time of copy,
      // in the map-task.
      if (! srcFileStatus.isDirectory()) continue;

      Path targetFile = new Path(targetRoot.toString() + "/" + srcRelPath);
      //
      // Skip the root folder when syncOrOverwrite is true.
      //
      if (targetRoot.equals(targetFile) && syncOrOverwrite) continue;

      FileSystem targetFS = targetFile.getFileSystem(conf);
      DistCpUtils.preserve(targetFS, targetFile, srcFileStatus, attributes,
          preserveRawXattrs);

      taskAttemptContext.progress();
      taskAttemptContext.setStatus("Preserving status on directory entries. [" +
          sourceReader.getPosition() * 100 / totalLen + "%]");
    }
  } finally {
    IOUtils.closeStream(sourceReader);
  }
  LOG.info("Preserved status on " + preservedEntries + " dir entries on target");
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:49,代碼來源:CopyCommitter.java

示例7: getLocalHostName

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Retrieve the name of the current host. Multihomed hosts may restrict the
 * hostname lookup to a specific interface and nameserver with {@link
 * org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_SECURITY_DNS_INTERFACE_KEY}
 * and {@link org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_SECURITY_DNS_NAMESERVER_KEY}
 *
 * @param conf Configuration object. May be null.
 * @return
 * @throws UnknownHostException
 */
static String getLocalHostName(@Nullable Configuration conf)
    throws UnknownHostException {
  if (conf != null) {
    String dnsInterface = conf.get(HADOOP_SECURITY_DNS_INTERFACE_KEY);
    String nameServer = conf.get(HADOOP_SECURITY_DNS_NAMESERVER_KEY);

    if (dnsInterface != null) {
      return DNS.getDefaultHost(dnsInterface, nameServer, true);
    } else if (nameServer != null) {
      throw new IllegalArgumentException(HADOOP_SECURITY_DNS_NAMESERVER_KEY +
          " requires " + HADOOP_SECURITY_DNS_INTERFACE_KEY + ". Check your" +
          "configuration.");
    }
  }

  // Fallback to querying the default hostname as we did before.
  return InetAddress.getLocalHost().getCanonicalHostName();
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:29,代碼來源:SecurityUtil.java

示例8: init

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public void init(String contextName, ContextFactory factory) {
  super.init(contextName, factory);

  LOG.debug("Initializing the GangliaContext31 for Ganglia 3.1 metrics.");

  // Take the hostname from the DNS class.

  Configuration conf = new Configuration();

  if (conf.get("slave.host.name") != null) {
    hostName = conf.get("slave.host.name");
  } else {
    try {
      hostName = DNS.getDefaultHost(
        conf.get("dfs.datanode.dns.interface","default"),
        conf.get("dfs.datanode.dns.nameserver","default"));
    } catch (UnknownHostException uhe) {
      LOG.error(uhe);
  	hostName = "UNKNOWN.example.com";
    }
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:24,代碼來源:GangliaContext31.java

示例9: TestPutRow

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public TestPutRow() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
開發者ID:aliyun,項目名稱:aliyun-tablestore-hbase-client,代碼行數:15,代碼來源:TestPutRow.java

示例10: getHPCLocalDirs

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static String[] getHPCLocalDirs(Configuration conf) {
  String getLocalDirs = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_LOCAL_DIRS, conf.get(
          YarnConfiguration.NM_LOCAL_DIRS,
          YarnConfiguration.DEFAULT_NM_LOCAL_DIRS));
  return StringUtils.getTrimmedStrings(getLocalDirs);
}
 
開發者ID:intel-hpdd,項目名稱:scheduling-connector-for-hadoop,代碼行數:8,代碼來源:HPCConfiguration.java

示例11: wrapKey

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Protect a key by encrypting it with the secret key of the given subject.
 * The configuration must be set up correctly for key alias resolution.
 * @param conf configuration
 * @param subject subject key alias
 * @param key the key
 * @return the encrypted key bytes
 */
public static byte[] wrapKey(Configuration conf, String subject, Key key)
    throws IOException {
  // Wrap the key with the configured encryption algorithm.
  String algorithm =
      conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
  Cipher cipher = Encryption.getCipher(conf, algorithm);
  if (cipher == null) {
    throw new RuntimeException("Cipher '" + algorithm + "' not available");
  }
  EncryptionProtos.WrappedKey.Builder builder = EncryptionProtos.WrappedKey.newBuilder();
  builder.setAlgorithm(key.getAlgorithm());
  byte[] iv = null;
  if (cipher.getIvLength() > 0) {
    iv = new byte[cipher.getIvLength()];
    RNG.nextBytes(iv);
    builder.setIv(ByteStringer.wrap(iv));
  }
  byte[] keyBytes = key.getEncoded();
  builder.setLength(keyBytes.length);
  builder.setHash(ByteStringer.wrap(Encryption.hash128(keyBytes)));
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject,
    conf, cipher, iv);
  builder.setData(ByteStringer.wrap(out.toByteArray()));
  // Build and return the protobuf message
  out.reset();
  builder.build().writeDelimitedTo(out);
  return out.toByteArray();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:38,代碼來源:EncryptionUtil.java

示例12: setConf

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
synchronized public void setConf(Configuration conf) {
  this.conf = conf;
  this.randomDevPath = conf.get(
      HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY,
      HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT);
  close();
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:9,代碼來源:OsSecureRandom.java

示例13: create

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static DataNodeMetrics create(Configuration conf, String dnName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
  String name = "DataNodeActivity-"+ (dnName.isEmpty()
      ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() 
          : dnName.replace(':', '-'));

  // Percentile measurement is off by default, by watching no intervals
  int[] intervals = 
      conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
  
  return ms.register(name, null, new DataNodeMetrics(name, sessionId,
      intervals, jm));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:16,代碼來源:DataNodeMetrics.java

示例14: getServiceURI

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Get the test URI
 * @param conf configuration
 * @throws SwiftConfigurationException missing parameter or bad URI
 */
public static URI getServiceURI(Configuration conf) throws
                                                    SwiftConfigurationException {
  String instance = conf.get(TEST_FS_SWIFT);
  if (instance == null) {
    throw new SwiftConfigurationException(
      "Missing configuration entry " + TEST_FS_SWIFT);
  }
  try {
    return new URI(instance);
  } catch (URISyntaxException e) {
    throw new SwiftConfigurationException("Bad URI: " + instance);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:SwiftTestUtils.java

示例15: testDeprecatedKeys

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public void testDeprecatedKeys() throws Exception {
  Configuration conf = new Configuration();
  conf.set("topology.script.file.name", "xyz");
  conf.set("topology.script.file.name", "xyz");
  String scriptFile = conf.get(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
  assertTrue(scriptFile.equals("xyz")) ;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:TestDeprecatedKeys.java


注:本文中的org.apache.hadoop.conf.Configuration.get方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。