当前位置: 首页>>代码示例>>Java>>正文


Java HdfsConfiguration.setInt方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.HdfsConfiguration.setInt方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsConfiguration.setInt方法的具体用法?Java HdfsConfiguration.setInt怎么用?Java HdfsConfiguration.setInt使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.HdfsConfiguration的用法示例。


在下文中一共展示了HdfsConfiguration.setInt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createSecureConfig

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
private Configuration createSecureConfig(String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
  conf.set(DFS_ENCRYPT_DATA_TRANSFER_KEY,
           "true");//https://issues.apache.org/jira/browse/HDFS-7431
  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:22,代码来源:TestWithSecureMiniDFSCluster.java

示例2: initZeroCopyTest

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
public static HdfsConfiguration initZeroCopyTest() {
  Assume.assumeTrue(NativeIO.isAvailable());
  Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE, 3);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS, 100);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestRequestMmapAccess._PORT.sock").getAbsolutePath());
  conf.setBoolean(DFSConfigKeys.
      DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, true);
  conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000);
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestEnhancedByteBufferAccess.java

示例3: createSecureConfig

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
/**
 * Creates configuration for starting a secure cluster.
 *
 * @param dataTransferProtection supported QOPs
 * @return configuration for starting a secure cluster
 * @throws Exception if there is any failure
 */
protected HdfsConfiguration createSecureConfig(
    String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:SaslDataTransferTestCase.java

示例4: startUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@BeforeClass
public static void startUp() throws IOException {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf)
      .format(true)
      .numDataNodes(DATANODE_NUM)
      .nameNodePort(NameNode.DEFAULT_PORT)
      .waitSafeMode(true)
      .build();
  fs = cluster.getFileSystem();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestFileTruncate.java

示例5: setup

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestNameNodeRetryCacheMetrics.java

示例6: initCluster

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
/**
 * Initializes the cluster.
 *
 * @param numDataNodes number of datanodes
 * @param storagesPerDatanode number of storage locations on each datanode
 * @param failedVolumesTolerated number of acceptable volume failures
 * @throws Exception if there is any failure
 */
private void initCluster(int numDataNodes, int storagesPerDatanode,
    int failedVolumesTolerated) throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
      failedVolumesTolerated);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
      .storagesPerDatanode(storagesPerDatanode).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
  long dnCapacity = DFSTestUtil.getDatanodeCapacity(
      cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
  volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestDataNodeVolumeFailureReporting.java

示例7: setUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  // Allow a single volume failure (there are two volumes)
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestDataNodeVolumeFailureToleration.java

示例8: createCluster

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
public static void createCluster() throws IOException {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.addResource(CONTRACT_HDFS_XML);
  //hack in a 256 byte block size
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

  cluster =
    new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  cluster.waitClusterUp();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:HDFSContract.java

示例9: testServerSaslNoClientSasl

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void testServerSaslNoClientSasl() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig(
    "authentication,integrity,privacy");
  // Set short retry timeouts so this test runs faster
  clusterConf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");

  LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
      LogFactory.getLog(DataNode.class));
  try {
    doTest(clientConf);
    Assert.fail("Should fail if SASL data transfer protection is not " +
        "configured or not supported in client");
  } catch (IOException e) {
    GenericTestUtils.assertMatches(e.getMessage(), 
        "could only be replicated to 0 nodes");
  } finally {
    logs.stopCapturing();
  }

  GenericTestUtils.assertMatches(logs.getOutput(),
      "Failed to read expected SASL data transfer protection " +
      "handshake from client at");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestSaslDataTransfer.java

示例10: createConfiguration

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Before
public void createConfiguration() {
  conf = new HdfsConfiguration();
  // Turn off persistent IPC, so that the DFSClient can survive NN restart
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestDFSUpgradeWithHA.java

示例11: init

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@BeforeClass
public static void init() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 3);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, MAX_SIZE);
  initCluster(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:FSXAttrBaseTest.java

示例12: createCachingConf

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
private static HdfsConfiguration createCachingConf() {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CACHE_CAPACITY);
  conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000);
  // set low limits here for testing purposes
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      2);

  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestCacheDirectives.java

示例13: setUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestDecommissioningStatus.java

示例14: setUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  // bring up a cluster of 2
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size);
  // Allow a single volume failure (there are two volumes)
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dn_num).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = new File(cluster.getDataDirectory());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestDataNodeVolumeFailure.java

示例15: setUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Before
public void setUp() throws IOException {
  conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 100);
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
    .build();
  for (int i = 0; i < 3; i++) {
    cluster.waitActive(i);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestDataNodeExit.java


注:本文中的org.apache.hadoop.hdfs.HdfsConfiguration.setInt方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。