当前位置: 首页>>代码示例>>Java>>正文


Java Configuration.unset方法代码示例

本文整理汇总了Java中org.apache.hadoop.conf.Configuration.unset方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.unset方法的具体用法?Java Configuration.unset怎么用?Java Configuration.unset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.unset方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testGetAuthenticationMethod

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testGetAuthenticationMethod() {
  Configuration conf = new Configuration();
  // default is simple
  conf.unset(HADOOP_SECURITY_AUTHENTICATION);
  assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
  // simple
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
  assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
  // kerberos
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  assertEquals(KERBEROS, SecurityUtil.getAuthenticationMethod(conf));
  // bad value
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kaboom");
  String error = null;
  try {
    SecurityUtil.getAuthenticationMethod(conf);
  } catch (Exception e) {
    error = e.toString();
  }
  assertEquals("java.lang.IllegalArgumentException: " +
               "Invalid attribute value for " +
               HADOOP_SECURITY_AUTHENTICATION + " of kaboom", error);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:25,代码来源:TestSecurityUtil.java

示例2: testNamenodeRpcBindAny

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testNamenodeRpcBindAny() throws IOException {
  Configuration conf = new HdfsConfiguration();

  // The name node in MiniDFSCluster only binds to 127.0.0.1.
  // We can set the bind address to 0.0.0.0 to make it listen
  // to all interfaces.
  conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc())
        .getClientRpcServer().getListenerAddress().getHostName());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
    // Reset the config
    conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestNameNodeRpcServer.java

示例3: setupRecoveryTestConf

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Create a test configuration that will exercise the initializeGenericKeys
 * code path.  This is a regression test for HDFS-4279.
 */
static void setupRecoveryTestConf(Configuration conf) throws IOException {
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
    "ns1"), "nn1,nn2");
  String baseDir = System.getProperty(
      MiniDFSCluster.PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
  File nameDir = new File(baseDir, "nameR");
  File secondaryDir = new File(baseDir, "namesecondaryR");
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
      DFS_NAMENODE_NAME_DIR_KEY, "ns1", "nn1"),
      nameDir.getCanonicalPath());
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
      DFS_NAMENODE_CHECKPOINT_DIR_KEY, "ns1", "nn1"),
      secondaryDir.getCanonicalPath());
  conf.unset(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
  conf.unset(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
  FileUtils.deleteQuietly(nameDir);
  if (!nameDir.mkdirs()) {
    throw new RuntimeException("failed to make directory " +
      nameDir.getAbsolutePath());
  }
  FileUtils.deleteQuietly(secondaryDir);
  if (!secondaryDir.mkdirs()) {
    throw new RuntimeException("failed to make directory " +
      secondaryDir.getAbsolutePath());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestNameNodeRecovery.java

示例4: validHostnameVerifier

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void validHostnameVerifier() throws Exception {
  Configuration conf = createConfiguration(false, true);
  conf.unset(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY);
  SSLFactory sslFactory = new
    SSLFactory(SSLFactory.Mode.CLIENT, conf);
  sslFactory.init();
  Assert.assertEquals("DEFAULT", sslFactory.getHostnameVerifier().toString());
  sslFactory.destroy();

  conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
  sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
  sslFactory.init();
  Assert.assertEquals("ALLOW_ALL",
                      sslFactory.getHostnameVerifier().toString());
  sslFactory.destroy();

  conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "DEFAULT_AND_LOCALHOST");
  sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
  sslFactory.init();
  Assert.assertEquals("DEFAULT_AND_LOCALHOST",
                      sslFactory.getHostnameVerifier().toString());
  sslFactory.destroy();

  conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT");
  sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
  sslFactory.init();
  Assert.assertEquals("STRICT", sslFactory.getHostnameVerifier().toString());
  sslFactory.destroy();

  conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT_IE6");
  sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
  sslFactory.init();
  Assert.assertEquals("STRICT_IE6",
                      sslFactory.getHostnameVerifier().toString());
  sslFactory.destroy();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:38,代码来源:TestSSLFactory.java

示例5: testNoClientCertsInitialization

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testNoClientCertsInitialization() throws Exception {
  Configuration conf = createConfiguration(false, true);
  conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
  SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
  try {
    sslFactory.init();
  } finally {
    sslFactory.destroy();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:12,代码来源:TestSSLFactory.java

示例6: testNoTrustStore

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testNoTrustStore() throws Exception {
  Configuration conf = createConfiguration(false, false);
  conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
  SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
  try {
    sslFactory.init();
  } finally {
    sslFactory.destroy();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:12,代码来源:TestSSLFactory.java

示例7: createConf

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Create a configuration with a specific topology script
 * @param script a (never executed) script, can be null
 * @return a configuration
 */
private Configuration createConf(String script) {
  Configuration conf = new Configuration();
  if (script != null) {
    conf.set(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
             script);
  } else {
    conf.unset(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
  }
  return conf;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:16,代码来源:TestStaticMapping.java

示例8: getConfigurationWithoutSharedEdits

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Clone the supplied configuration but remove the shared edits dirs.
 *
 * @param conf Supplies the original configuration.
 * @return Cloned configuration without the shared edit dirs.
 * @throws IOException on failure to generate the configuration.
 */
private static Configuration getConfigurationWithoutSharedEdits(
    Configuration conf)
    throws IOException {
  List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false);
  String editsDirsString = Joiner.on(",").join(editsDirs);

  Configuration confWithoutShared = new Configuration(conf);
  confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
  confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      editsDirsString);
  return confWithoutShared;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:NameNode.java

示例9: resetCacheConfig

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on
 * direct memory will likely cause the map tasks to OOM when opening the region. This
 * is done here instead of in TableSnapshotRegionRecordReader in case an advanced user
 * wants to override this behavior in their job.
 */
public static void resetCacheConfig(Configuration conf) {
  conf.setFloat(
    HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
  conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f);
  conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TableMapReduceUtil.java

示例10: excludeIncompatibleCredentialProviders

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * There are certain integrations of the credential provider API in
 * which a recursive dependency between the provider and the hadoop
 * filesystem abstraction causes a problem. These integration points
 * need to leverage this utility method to remove problematic provider
 * types from the existing provider path within the configuration.
 *
 * @param config the existing configuration with provider path
 * @param fileSystemClass the class which providers must be compatible
 * @return Configuration clone with new provider path
 */
public static Configuration excludeIncompatibleCredentialProviders(
    Configuration config, Class<? extends FileSystem> fileSystemClass)
        throws IOException {

  String providerPath = config.get(
      CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH);

  if (providerPath == null) {
    return config;
  }
  StringBuffer newProviderPath = new StringBuffer();
  String[] providers = providerPath.split(",");
  Path path = null;
  for (String provider: providers) {
    try {
      path = unnestUri(new URI(provider));
      Class<? extends FileSystem> clazz = null;
      try {
        String scheme = path.toUri().getScheme();
        clazz = FileSystem.getFileSystemClass(scheme, config);
      } catch (IOException ioe) {
        // not all providers are filesystem based
        // for instance user:/// will not be able to
        // have a filesystem class associated with it.
        if (newProviderPath.length() > 0) {
          newProviderPath.append(",");
        }
        newProviderPath.append(provider);
      }
      if (clazz != null) {
        if (fileSystemClass.isAssignableFrom(clazz)) {
          LOG.debug("Filesystem based provider" +
              " excluded from provider path due to recursive dependency: "
              + provider);
        } else {
          if (newProviderPath.length() > 0) {
            newProviderPath.append(",");
          }
          newProviderPath.append(provider);
        }
      }
    } catch (URISyntaxException e) {
      LOG.warn("Credential Provider URI is invalid." + provider);
    }
  }

  String effectivePath = newProviderPath.toString();
  if (effectivePath.equals(providerPath)) {
    return config;
  }

  Configuration conf = new Configuration(config);
  if (effectivePath.equals("")) {
    conf.unset(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH);
  } else {
    conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
        effectivePath);
  }
  return conf;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:72,代码来源:ProviderUtils.java

示例11: getFileSystem

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static FileSystem getFileSystem(Configuration conf) throws IOException {
  Configuration localConf = new Configuration(conf);
  localConf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, false);
  localConf.unset(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  return FileSystem.get(localConf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:TestEncryptedTransfer.java

示例12: testFailWhenNoSharedEditsSpecified

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testFailWhenNoSharedEditsSpecified() throws Exception {
  Configuration confNoShared = new Configuration(conf);
  confNoShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
  assertFalse(NameNode.initializeSharedEdits(confNoShared, true));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:TestInitializeSharedEdits.java

示例13: testPreserveStatus

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testPreserveStatus() {
  TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
  JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
      taskAttemptContext.getTaskAttemptID().getJobID());
  Configuration conf = jobContext.getConfiguration();


  String sourceBase;
  String targetBase;
  FileSystem fs = null;
  try {
    OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
    fs = FileSystem.get(conf);
    FsPermission sourcePerm = new FsPermission((short) 511);
    FsPermission initialPerm = new FsPermission((short) 448);
    sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm);
    targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm);

    DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)),
        new Path("/out"));
    options.preserve(FileAttribute.PERMISSION);
    options.appendToConf(conf);
    options.setTargetPathExists(false);
    
    CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
    Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
    listing.buildListing(listingFile, options);

    conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);

    committer.commitJob(jobContext);
    if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
      Assert.fail("Permission don't match");
    }

    //Test for idempotent commit
    committer.commitJob(jobContext);
    if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
      Assert.fail("Permission don't match");
    }

  } catch (IOException e) {
    LOG.error("Exception encountered while testing for preserve status", e);
    Assert.fail("Preserve status failure");
  } finally {
    TestDistCpUtils.delete(fs, "/tmp1");
    conf.unset(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  }

}
 
开发者ID:naver,项目名称:hadoop,代码行数:52,代码来源:TestCopyCommitter.java

示例14: testJavaHeapOptions

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Test the specified task java heap options.
 */
@SuppressWarnings("deprecation")
private void testJavaHeapOptions(String mapOptions, 
    String reduceOptions, String taskOptions, String defaultMapOptions, 
    String defaultReduceOptions, String defaultTaskOptions, 
    String expectedMapOptions, String expectedReduceOptions, 
    String expectedTaskOptions) throws Exception {
  Configuration simulatedConf = new Configuration();
  // reset the configuration parameters
  simulatedConf.unset(MRJobConfig.MAP_JAVA_OPTS);
  simulatedConf.unset(MRJobConfig.REDUCE_JAVA_OPTS);
  simulatedConf.unset(JobConf.MAPRED_TASK_JAVA_OPTS);
  
  // set the default map task options
  if (defaultMapOptions != null) {
    simulatedConf.set(MRJobConfig.MAP_JAVA_OPTS, defaultMapOptions);
  }
  // set the default reduce task options
  if (defaultReduceOptions != null) {
    simulatedConf.set(MRJobConfig.REDUCE_JAVA_OPTS, defaultReduceOptions);
  }
  // set the default task options
  if (defaultTaskOptions != null) {
    simulatedConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, defaultTaskOptions);
  }
  
  Configuration originalConf = new Configuration();
  // reset the configuration parameters
  originalConf.unset(MRJobConfig.MAP_JAVA_OPTS);
  originalConf.unset(MRJobConfig.REDUCE_JAVA_OPTS);
  originalConf.unset(JobConf.MAPRED_TASK_JAVA_OPTS);
  
  // set the map task options
  if (mapOptions != null) {
    originalConf.set(MRJobConfig.MAP_JAVA_OPTS, mapOptions);
  }
  // set the reduce task options
  if (reduceOptions != null) {
    originalConf.set(MRJobConfig.REDUCE_JAVA_OPTS, reduceOptions);
  }
  // set the task options
  if (taskOptions != null) {
    originalConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, taskOptions);
  }
  
  // configure the task jvm's heap options
  GridmixJob.configureTaskJVMOptions(originalConf, simulatedConf);
  
  assertEquals("Map heap options mismatch!", expectedMapOptions, 
               simulatedConf.get(MRJobConfig.MAP_JAVA_OPTS));
  assertEquals("Reduce heap options mismatch!", expectedReduceOptions, 
               simulatedConf.get(MRJobConfig.REDUCE_JAVA_OPTS));
  assertEquals("Task heap options mismatch!", expectedTaskOptions, 
               simulatedConf.get(JobConf.MAPRED_TASK_JAVA_OPTS));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestGridmixMemoryEmulation.java

示例15: cleanUpTokenReferral

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Remove jobtoken referrals which don't make sense in the context
 * of the task execution.
 *
 * @param conf
 */
public static void cleanUpTokenReferral(Configuration conf) {
  conf.unset(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TokenCache.java


注:本文中的org.apache.hadoop.conf.Configuration.unset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。