当前位置: 首页>>代码示例>>Java>>正文


Java HdfsConfiguration.set方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.HdfsConfiguration.set方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsConfiguration.set方法的具体用法?Java HdfsConfiguration.set怎么用?Java HdfsConfiguration.set使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.HdfsConfiguration的用法示例。


在下文中一共展示了HdfsConfiguration.set方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: TestGetConfIncludeCommand

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void TestGetConfIncludeCommand() throws Exception{
	HdfsConfiguration conf = new HdfsConfiguration();
  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, System.getProperty("test.build.data", "target/test/data") + "/Getconf/");
  Path hostsFile = new Path(dir, "hosts");
  Path excludeFile = new Path(dir, "exclude");
  
  // Setup conf
  conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  writeConfigFile(hostsFile, null);
  writeConfigFile(excludeFile, null);    
  String[] args = {"-includeFile"};
  String ret = runTool(conf, args, true);
  assertEquals(hostsFile.toUri().getPath(),ret.trim());
  cleanupFile(localFileSys, excludeFile.getParent());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestGetConf.java

示例2: initZeroCopyTest

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
public static HdfsConfiguration initZeroCopyTest() {
  Assume.assumeTrue(NativeIO.isAvailable());
  Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE, 3);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS, 100);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestRequestMmapAccess._PORT.sock").getAbsolutePath());
  conf.setBoolean(DFSConfigKeys.
      DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, true);
  conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000);
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestEnhancedByteBufferAccess.java

示例3: TestGetConfExcludeCommand

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void TestGetConfExcludeCommand() throws Exception{
	HdfsConfiguration conf = new HdfsConfiguration();
  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, System.getProperty("test.build.data", "target/test/data") + "/Getconf/");
  Path hostsFile = new Path(dir, "hosts");
  Path excludeFile = new Path(dir, "exclude");
  
  // Setup conf
  conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  writeConfigFile(hostsFile, null);
  writeConfigFile(excludeFile, null);    
  String[] args = {"-excludeFile"};
  String ret = runTool(conf, args, true);
  assertEquals(excludeFile.toUri().getPath(),ret.trim());
  cleanupFile(localFileSys, excludeFile.getParent());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestGetConf.java

示例4: setUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory());

  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name")).toString());
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      new File(hdfsDir, "data").getPath());
  config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(hdfsDir, "secondary")).toString());
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
      WILDCARD_HTTP_HOST + "0");
  
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestStartup.java

示例5: testFencingConfigPerNameNode

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
/**
 * Test that the fencing configuration can be overridden per-nameservice
 * or per-namenode
 */
@Test
public void testFencingConfigPerNameNode() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();

  final String nsSpecificKey = DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY + "." + NSID;
  final String nnSpecificKey = nsSpecificKey + ".nn1";
  
  HdfsConfiguration conf = getHAConf();
  // Set the default fencer to succeed
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
  
  // Set the NN-specific fencer to fail. Should fail to fence.
  conf.set(nnSpecificKey, getFencerFalseCommand());
  tool.setConf(conf);
  assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
  conf.unset(nnSpecificKey);

  // Set an NS-specific fencer to fail. Should fail.
  conf.set(nsSpecificKey, getFencerFalseCommand());
  tool.setConf(conf);
  assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
  
  // Set the NS-specific fencer to succeed. Should succeed
  conf.set(nsSpecificKey, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestDFSHAAdmin.java

示例6: testIncludeInternalNameServices

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void testIncludeInternalNameServices() throws Exception {
  final int nsCount = 10;
  final int remoteNsCount = 4;
  HdfsConfiguration conf = new HdfsConfiguration();
  setupNameServices(conf, nsCount);
  setupAddress(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsCount, 1000);
  setupAddress(conf, DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1500);
  conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
  setupStaticHostResolution(nsCount);

  String[] includedNN = new String[] {"nn1:1001"};
  verifyAddresses(conf, TestType.NAMENODE, false, includedNN);
  verifyAddresses(conf, TestType.NNRPCADDRESSES, true, includedNN);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestGetConf.java

示例7: setUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  config.set("hadoop.security.auth_to_local",
      "RULE:[2:[email protected]$0]([email protected]*FOO.COM)s/@.*//" + "DEFAULT");
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
  cluster.waitActive();
  dtSecretManager = NameNodeAdapter.getDtSecretManager(
      cluster.getNamesystem());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestDelegationToken.java

示例8: testFailoverWithFencerConfiguredAndForce

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void testFailoverWithFencerConfiguredAndForce() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  HdfsConfiguration conf = getHAConf();
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestDFSHAAdmin.java

示例9: getHAConf

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
private HdfsConfiguration getHAConf() {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID);    
  conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, NSID);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2");    
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
  conf.set(DFSUtil.addKeySuffixes(
          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn1"),
      HOST_A + ":12345");
  conf.set(DFSUtil.addKeySuffixes(
          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn2"),
      HOST_B + ":12345");
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestDFSHAAdmin.java

示例10: testForceFenceOptionListedBeforeArgs

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void testForceFenceOptionListedBeforeArgs() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  HdfsConfiguration conf = getHAConf();
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestDFSHAAdmin.java

示例11: testPrivacy

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void testPrivacy() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig(
    "authentication,integrity,privacy");
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "privacy");
  doTest(clientConf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestSaslDataTransfer.java

示例12: testClientAndServerDoNotHaveCommonQop

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void testClientAndServerDoNotHaveCommonQop() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig("privacy");
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
  exception.expect(IOException.class);
  exception.expectMessage("could only be replicated to 0 nodes");
  doTest(clientConf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestSaslDataTransfer.java

示例13: testServerSaslNoClientSasl

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void testServerSaslNoClientSasl() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig(
    "authentication,integrity,privacy");
  // Set short retry timeouts so this test runs faster
  clusterConf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");

  LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
      LogFactory.getLog(DataNode.class));
  try {
    doTest(clientConf);
    Assert.fail("Should fail if SASL data transfer protection is not " +
        "configured or not supported in client");
  } catch (IOException e) {
    GenericTestUtils.assertMatches(e.getMessage(), 
        "could only be replicated to 0 nodes");
  } finally {
    logs.stopCapturing();
  }

  GenericTestUtils.assertMatches(logs.getOutput(),
      "Failed to read expected SASL data transfer protection " +
      "handshake from client at");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestSaslDataTransfer.java

示例14: testDataNodeAbortsIfNotHttpsOnly

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void testDataNodeAbortsIfNotHttpsOnly() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig("authentication");
  clusterConf.set(DFS_HTTP_POLICY_KEY,
    HttpConfig.Policy.HTTP_AND_HTTPS.name());
  exception.expect(RuntimeException.class);
  exception.expectMessage("Cannot start secure DataNode");
  startCluster(clusterConf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestSaslDataTransfer.java

示例15: testFailoverWithFencerConfigured

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入方法依赖的package包/类
@Test
public void testFailoverWithFencerConfigured() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  HdfsConfiguration conf = getHAConf();
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2"));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestDFSHAAdmin.java


注:本文中的org.apache.hadoop.hdfs.HdfsConfiguration.set方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。