當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.unset方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.unset方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.unset方法的具體用法?Java Configuration.unset怎麽用?Java Configuration.unset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.unset方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testGetAuthenticationMethod

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testGetAuthenticationMethod() {
  Configuration conf = new Configuration();
  // default is simple
  conf.unset(HADOOP_SECURITY_AUTHENTICATION);
  assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
  // simple
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
  assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
  // kerberos
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  assertEquals(KERBEROS, SecurityUtil.getAuthenticationMethod(conf));
  // bad value
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kaboom");
  String error = null;
  try {
    SecurityUtil.getAuthenticationMethod(conf);
  } catch (Exception e) {
    error = e.toString();
  }
  assertEquals("java.lang.IllegalArgumentException: " +
               "Invalid attribute value for " +
               HADOOP_SECURITY_AUTHENTICATION + " of kaboom", error);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:25,代碼來源:TestSecurityUtil.java

示例2: testNamenodeRpcBindAny

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testNamenodeRpcBindAny() throws IOException {
  Configuration conf = new HdfsConfiguration();

  // The name node in MiniDFSCluster only binds to 127.0.0.1.
  // We can set the bind address to 0.0.0.0 to make it listen
  // to all interfaces.
  conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc())
        .getClientRpcServer().getListenerAddress().getHostName());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
    // Reset the config
    conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:TestNameNodeRpcServer.java

示例3: setupRecoveryTestConf

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Create a test configuration that will exercise the initializeGenericKeys
 * code path.  This is a regression test for HDFS-4279.
 */
static void setupRecoveryTestConf(Configuration conf) throws IOException {
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
    "ns1"), "nn1,nn2");
  String baseDir = System.getProperty(
      MiniDFSCluster.PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
  File nameDir = new File(baseDir, "nameR");
  File secondaryDir = new File(baseDir, "namesecondaryR");
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
      DFS_NAMENODE_NAME_DIR_KEY, "ns1", "nn1"),
      nameDir.getCanonicalPath());
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
      DFS_NAMENODE_CHECKPOINT_DIR_KEY, "ns1", "nn1"),
      secondaryDir.getCanonicalPath());
  conf.unset(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
  conf.unset(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
  FileUtils.deleteQuietly(nameDir);
  if (!nameDir.mkdirs()) {
    throw new RuntimeException("failed to make directory " +
      nameDir.getAbsolutePath());
  }
  FileUtils.deleteQuietly(secondaryDir);
  if (!secondaryDir.mkdirs()) {
    throw new RuntimeException("failed to make directory " +
      secondaryDir.getAbsolutePath());
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:33,代碼來源:TestNameNodeRecovery.java

示例4: validHostnameVerifier

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void validHostnameVerifier() throws Exception {
  Configuration conf = createConfiguration(false, true);
  conf.unset(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY);
  SSLFactory sslFactory = new
    SSLFactory(SSLFactory.Mode.CLIENT, conf);
  sslFactory.init();
  Assert.assertEquals("DEFAULT", sslFactory.getHostnameVerifier().toString());
  sslFactory.destroy();

  conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
  sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
  sslFactory.init();
  Assert.assertEquals("ALLOW_ALL",
                      sslFactory.getHostnameVerifier().toString());
  sslFactory.destroy();

  conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "DEFAULT_AND_LOCALHOST");
  sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
  sslFactory.init();
  Assert.assertEquals("DEFAULT_AND_LOCALHOST",
                      sslFactory.getHostnameVerifier().toString());
  sslFactory.destroy();

  conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT");
  sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
  sslFactory.init();
  Assert.assertEquals("STRICT", sslFactory.getHostnameVerifier().toString());
  sslFactory.destroy();

  conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT_IE6");
  sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
  sslFactory.init();
  Assert.assertEquals("STRICT_IE6",
                      sslFactory.getHostnameVerifier().toString());
  sslFactory.destroy();
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:38,代碼來源:TestSSLFactory.java

示例5: testNoClientCertsInitialization

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testNoClientCertsInitialization() throws Exception {
  Configuration conf = createConfiguration(false, true);
  conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
  SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
  try {
    sslFactory.init();
  } finally {
    sslFactory.destroy();
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:12,代碼來源:TestSSLFactory.java

示例6: testNoTrustStore

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testNoTrustStore() throws Exception {
  Configuration conf = createConfiguration(false, false);
  conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
  SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
  try {
    sslFactory.init();
  } finally {
    sslFactory.destroy();
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:12,代碼來源:TestSSLFactory.java

示例7: createConf

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Create a configuration with a specific topology script
 * @param script a (never executed) script, can be null
 * @return a configuration
 */
private Configuration createConf(String script) {
  Configuration conf = new Configuration();
  if (script != null) {
    conf.set(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
             script);
  } else {
    conf.unset(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
  }
  return conf;
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:16,代碼來源:TestStaticMapping.java

示例8: getConfigurationWithoutSharedEdits

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Clone the supplied configuration but remove the shared edits dirs.
 *
 * @param conf Supplies the original configuration.
 * @return Cloned configuration without the shared edit dirs.
 * @throws IOException on failure to generate the configuration.
 */
private static Configuration getConfigurationWithoutSharedEdits(
    Configuration conf)
    throws IOException {
  List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false);
  String editsDirsString = Joiner.on(",").join(editsDirs);

  Configuration confWithoutShared = new Configuration(conf);
  confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
  confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      editsDirsString);
  return confWithoutShared;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:NameNode.java

示例9: resetCacheConfig

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on
 * direct memory will likely cause the map tasks to OOM when opening the region. This
 * is done here instead of in TableSnapshotRegionRecordReader in case an advanced user
 * wants to override this behavior in their job.
 */
public static void resetCacheConfig(Configuration conf) {
  conf.setFloat(
    HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
  conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f);
  conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:TableMapReduceUtil.java

示例10: excludeIncompatibleCredentialProviders

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * There are certain integrations of the credential provider API in
 * which a recursive dependency between the provider and the hadoop
 * filesystem abstraction causes a problem. These integration points
 * need to leverage this utility method to remove problematic provider
 * types from the existing provider path within the configuration.
 *
 * @param config the existing configuration with provider path
 * @param fileSystemClass the class which providers must be compatible
 * @return Configuration clone with new provider path
 */
public static Configuration excludeIncompatibleCredentialProviders(
    Configuration config, Class<? extends FileSystem> fileSystemClass)
        throws IOException {

  String providerPath = config.get(
      CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH);

  if (providerPath == null) {
    return config;
  }
  StringBuffer newProviderPath = new StringBuffer();
  String[] providers = providerPath.split(",");
  Path path = null;
  for (String provider: providers) {
    try {
      path = unnestUri(new URI(provider));
      Class<? extends FileSystem> clazz = null;
      try {
        String scheme = path.toUri().getScheme();
        clazz = FileSystem.getFileSystemClass(scheme, config);
      } catch (IOException ioe) {
        // not all providers are filesystem based
        // for instance user:/// will not be able to
        // have a filesystem class associated with it.
        if (newProviderPath.length() > 0) {
          newProviderPath.append(",");
        }
        newProviderPath.append(provider);
      }
      if (clazz != null) {
        if (fileSystemClass.isAssignableFrom(clazz)) {
          LOG.debug("Filesystem based provider" +
              " excluded from provider path due to recursive dependency: "
              + provider);
        } else {
          if (newProviderPath.length() > 0) {
            newProviderPath.append(",");
          }
          newProviderPath.append(provider);
        }
      }
    } catch (URISyntaxException e) {
      LOG.warn("Credential Provider URI is invalid." + provider);
    }
  }

  String effectivePath = newProviderPath.toString();
  if (effectivePath.equals(providerPath)) {
    return config;
  }

  Configuration conf = new Configuration(config);
  if (effectivePath.equals("")) {
    conf.unset(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH);
  } else {
    conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
        effectivePath);
  }
  return conf;
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:72,代碼來源:ProviderUtils.java

示例11: getFileSystem

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private static FileSystem getFileSystem(Configuration conf) throws IOException {
  Configuration localConf = new Configuration(conf);
  localConf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, false);
  localConf.unset(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  return FileSystem.get(localConf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:7,代碼來源:TestEncryptedTransfer.java

示例12: testFailWhenNoSharedEditsSpecified

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testFailWhenNoSharedEditsSpecified() throws Exception {
  Configuration confNoShared = new Configuration(conf);
  confNoShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
  assertFalse(NameNode.initializeSharedEdits(confNoShared, true));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:7,代碼來源:TestInitializeSharedEdits.java

示例13: testPreserveStatus

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testPreserveStatus() {
  TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
  JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
      taskAttemptContext.getTaskAttemptID().getJobID());
  Configuration conf = jobContext.getConfiguration();


  String sourceBase;
  String targetBase;
  FileSystem fs = null;
  try {
    OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
    fs = FileSystem.get(conf);
    FsPermission sourcePerm = new FsPermission((short) 511);
    FsPermission initialPerm = new FsPermission((short) 448);
    sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm);
    targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm);

    DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)),
        new Path("/out"));
    options.preserve(FileAttribute.PERMISSION);
    options.appendToConf(conf);
    options.setTargetPathExists(false);
    
    CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
    Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
    listing.buildListing(listingFile, options);

    conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);

    committer.commitJob(jobContext);
    if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
      Assert.fail("Permission don't match");
    }

    //Test for idempotent commit
    committer.commitJob(jobContext);
    if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
      Assert.fail("Permission don't match");
    }

  } catch (IOException e) {
    LOG.error("Exception encountered while testing for preserve status", e);
    Assert.fail("Preserve status failure");
  } finally {
    TestDistCpUtils.delete(fs, "/tmp1");
    conf.unset(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  }

}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:52,代碼來源:TestCopyCommitter.java

示例14: testJavaHeapOptions

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Test the specified task java heap options.
 */
@SuppressWarnings("deprecation")
private void testJavaHeapOptions(String mapOptions, 
    String reduceOptions, String taskOptions, String defaultMapOptions, 
    String defaultReduceOptions, String defaultTaskOptions, 
    String expectedMapOptions, String expectedReduceOptions, 
    String expectedTaskOptions) throws Exception {
  Configuration simulatedConf = new Configuration();
  // reset the configuration parameters
  simulatedConf.unset(MRJobConfig.MAP_JAVA_OPTS);
  simulatedConf.unset(MRJobConfig.REDUCE_JAVA_OPTS);
  simulatedConf.unset(JobConf.MAPRED_TASK_JAVA_OPTS);
  
  // set the default map task options
  if (defaultMapOptions != null) {
    simulatedConf.set(MRJobConfig.MAP_JAVA_OPTS, defaultMapOptions);
  }
  // set the default reduce task options
  if (defaultReduceOptions != null) {
    simulatedConf.set(MRJobConfig.REDUCE_JAVA_OPTS, defaultReduceOptions);
  }
  // set the default task options
  if (defaultTaskOptions != null) {
    simulatedConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, defaultTaskOptions);
  }
  
  Configuration originalConf = new Configuration();
  // reset the configuration parameters
  originalConf.unset(MRJobConfig.MAP_JAVA_OPTS);
  originalConf.unset(MRJobConfig.REDUCE_JAVA_OPTS);
  originalConf.unset(JobConf.MAPRED_TASK_JAVA_OPTS);
  
  // set the map task options
  if (mapOptions != null) {
    originalConf.set(MRJobConfig.MAP_JAVA_OPTS, mapOptions);
  }
  // set the reduce task options
  if (reduceOptions != null) {
    originalConf.set(MRJobConfig.REDUCE_JAVA_OPTS, reduceOptions);
  }
  // set the task options
  if (taskOptions != null) {
    originalConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, taskOptions);
  }
  
  // configure the task jvm's heap options
  GridmixJob.configureTaskJVMOptions(originalConf, simulatedConf);
  
  assertEquals("Map heap options mismatch!", expectedMapOptions, 
               simulatedConf.get(MRJobConfig.MAP_JAVA_OPTS));
  assertEquals("Reduce heap options mismatch!", expectedReduceOptions, 
               simulatedConf.get(MRJobConfig.REDUCE_JAVA_OPTS));
  assertEquals("Task heap options mismatch!", expectedTaskOptions, 
               simulatedConf.get(JobConf.MAPRED_TASK_JAVA_OPTS));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:58,代碼來源:TestGridmixMemoryEmulation.java

示例15: cleanUpTokenReferral

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Remove jobtoken referrals which don't make sense in the context
 * of the task execution.
 *
 * @param conf
 */
public static void cleanUpTokenReferral(Configuration conf) {
  conf.unset(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:10,代碼來源:TokenCache.java


注:本文中的org.apache.hadoop.conf.Configuration.unset方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。