当前位置: 首页>>代码示例>>Java>>正文


Java HdfsConfiguration.setBoolean方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.HdfsConfiguration.setBoolean方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsConfiguration.setBoolean方法的具体用法?Java HdfsConfiguration.setBoolean怎么用?Java HdfsConfiguration.setBoolean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.HdfsConfiguration的用法示例。


在下文中一共展示了HdfsConfiguration.setBoolean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createSecureConfig

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
private Configuration createSecureConfig(String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
  conf.set(DFS_ENCRYPT_DATA_TRANSFER_KEY,
           "true");//https://issues.apache.org/jira/browse/HDFS-7431
  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:22,代码来源:TestWithSecureMiniDFSCluster.java

示例2: initZeroCopyTest

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
public static HdfsConfiguration initZeroCopyTest() {
  Assume.assumeTrue(NativeIO.isAvailable());
  Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE, 3);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS, 100);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestRequestMmapAccess._PORT.sock").getAbsolutePath());
  conf.setBoolean(DFSConfigKeys.
      DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, true);
  conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000);
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestEnhancedByteBufferAccess.java

示例3: setUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER),
      "group1");
  config.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  configureSuperUserIPAddresses(config, REAL_USER);
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).build();
  cluster.waitActive();
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  ugi = UserGroupInformation.createRemoteUser(REAL_USER);
  proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi,
      GROUP_NAMES);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestDelegationTokenForProxyUser.java

示例4: createSecureConfig

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
/**
 * Creates configuration for starting a secure cluster.
 *
 * @param dataTransferProtection supported QOPs
 * @return configuration for starting a secure cluster
 * @throws Exception if there is any failure
 */
protected HdfsConfiguration createSecureConfig(
    String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:SaslDataTransferTestCase.java

示例5: setup

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestNameNodeRetryCacheMetrics.java

示例6: testMonitoringOperationsWithAutoHaEnabled

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
/**
 * Test that, even if automatic HA is enabled, the monitoring operations
 * still function correctly.
 */
@Test
public void testMonitoringOperationsWithAutoHaEnabled() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();

  // Turn on auto-HA
  HdfsConfiguration conf = getHAConf();
  conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
  tool.setConf(conf);

  assertEquals(0, runTool("-checkHealth", "nn1"));
  Mockito.verify(mockProtocol).monitorHealth();
  
  assertEquals(0, runTool("-getServiceState", "nn1"));
  Mockito.verify(mockProtocol).getServiceStatus();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestDFSHAAdmin.java

示例7: init

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@BeforeClass
public static void init() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  initCluster(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestStickyBit.java

示例8: setUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  config.set("hadoop.security.auth_to_local",
      "RULE:[2:[email protected]$0]([email protected]*FOO.COM)s/@.*//" + "DEFAULT");
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
  cluster.waitActive();
  dtSecretManager = NameNodeAdapter.getDtSecretManager(
      cluster.getNamesystem());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestDelegationToken.java

示例9: testNoSaslAndSecurePortsIgnored

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void testNoSaslAndSecurePortsIgnored() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig("");
  clusterConf.setBoolean(IGNORE_SECURE_PORTS_FOR_TESTING_KEY, true);
  startCluster(clusterConf);
  doTest(clusterConf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestSaslDataTransfer.java

示例10: init

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@BeforeClass
public static void init() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 3);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, MAX_SIZE);
  initCluster(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:FSXAttrBaseTest.java

示例11: setupCluster

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Before
 public void setupCluster() throws Exception {
   // must configure prior to instantiating the namesystem because it
   // will reconfigure the logger if async is enabled
   configureAuditLogs();
   conf = new HdfsConfiguration();
   final long precision = 1L;
   conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
   conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
   conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
   conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
   util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
       setNumFiles(20).build();
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
   fs = cluster.getFileSystem();
   util.createFiles(fs, fileName);

   // make sure the appender is what it's supposed to be
   Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
   @SuppressWarnings("unchecked")
   List<Appender> appenders = Collections.list(logger.getAllAppenders());
   assertEquals(1, appenders.size());
   assertEquals(useAsyncLog, appenders.get(0) instanceof AsyncAppender);
   
   fnames = util.getFileNames(fileName);
   util.waitReplication(fs, fileName, (short)3);
   userGroupInfo = UserGroupInformation.createUserForTesting(username, groups);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestAuditLogs.java

示例12: setup

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  cluster = new MiniDFSCluster.Builder(conf).build();
  cluster.waitActive();
  nnRpc = cluster.getNameNode().getRpcServer();
  filesystem = cluster.getFileSystem();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestNamenodeRetryCache.java

示例13: setUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestDecommissioningStatus.java

示例14: setUpNameDirs

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Before
public void setUpNameDirs() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  
  hdfsDir.mkdirs();
  path1 = new File(hdfsDir, "name1");
  path2 = new File(hdfsDir, "name2");
  path3 = new File(hdfsDir, "name3");
  
  path1.mkdir(); path2.mkdir(); path3.mkdir();
  if(!path2.exists() ||  !path3.exists() || !path1.exists()) {
    throw new IOException("Couldn't create dfs.name dirs in " + hdfsDir.getAbsolutePath());
  }
  
  String dfs_name_dir = new String(path1.getPath() + "," + path2.getPath());
  System.out.println("configuring hdfsdir is " + hdfsDir.getAbsolutePath() + 
      "; dfs_name_dir = "+ dfs_name_dir + ";dfs_name_edits_dir(only)=" + path3.getPath());
  
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dfs_name_dir);
  config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, dfs_name_dir + "," + path3.getPath());

  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
 
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
  
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  
  // set the restore feature on
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestStorageRestore.java

示例15: getDefaultConf

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
private static Configuration getDefaultConf() {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 50);
  conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 250);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
      TestFsDatasetCache.CACHE_CAPACITY);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
    new File(sockDir.getDir(), "sock").getAbsolutePath());
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestFsDatasetCacheRevocation.java


注:本文中的org.apache.hadoop.hdfs.HdfsConfiguration.setBoolean方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。