当前位置: 首页>>代码示例>>Java>>正文


Java DFSConfigKeys类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSConfigKeys的典型用法代码示例。如果您正苦于以下问题:Java DFSConfigKeys类的具体用法?Java DFSConfigKeys怎么用?Java DFSConfigKeys使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DFSConfigKeys类属于org.apache.hadoop.hdfs包,在下文中一共展示了DFSConfigKeys类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: init

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
/**
 * Init
 * @throws IOException
 */
public void init() throws IOException {
  int flushLen = conf.getInt(AngelConf.ANGEL_LOG_FLUSH_MIN_SIZE, AngelConf.DEFAULT_ANGEL_LOG_FLUSH_MIN_SIZE);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, flushLen);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, flushLen);

  String pathStr = conf.get(AngelConf.ANGEL_LOG_PATH);
  if (pathStr == null) {
    throw new IOException("log directory is null. you must set " + AngelConf.ANGEL_LOG_PATH);
  }

  LOG.info("algorithm log output directory=" + pathStr);

  Path path = new Path(pathStr + "/log");
  FileSystem fs = path.getFileSystem(conf);
  if (fs.exists(path)) {
    fs.delete(path, true);
  }
  outputStream =  fs.create(path, true);
}
 
开发者ID:Tencent,项目名称:angel,代码行数:24,代码来源:DistributeLog.java

示例2: testConcat

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
private void testConcat() throws Exception {
  Configuration config = getProxiedFSConf();
  config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(config);
    fs.mkdirs(getProxiedFSTestDir());
    Path path1 = new Path("/test/foo.txt");
    Path path2 = new Path("/test/bar.txt");
    Path path3 = new Path("/test/derp.txt");
    DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
    fs.close();
    fs = getHttpFSFileSystem();
    fs.concat(path1, new Path[]{path2, path3});
    fs.close();
    fs = FileSystem.get(config);
    Assert.assertTrue(fs.exists(path1));
    Assert.assertFalse(fs.exists(path2));
    Assert.assertFalse(fs.exists(path3));
    fs.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:BaseTestHttpFSWith.java

示例3: setUp

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER),
      "group1");
  config.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  configureSuperUserIPAddresses(config, REAL_USER);
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).build();
  cluster.waitActive();
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  ugi = UserGroupInformation.createRemoteUser(REAL_USER);
  proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi,
      GROUP_NAMES);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestDelegationTokenForProxyUser.java

示例4: genClientWithDummyHandler

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestRetryCacheWithHA.java

示例5: testGetConfiguration

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
@Test
public void testGetConfiguration() throws ServletException {
  AuthFilter filter = new AuthFilter();
  Map<String, String> m = new HashMap<String,String>();
  m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
      "xyz/[email protected]");
  m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
      "thekeytab");
  FilterConfig config = new DummyFilterConfig(m);
  Properties p = filter.getConfiguration("random", config);
  Assert.assertEquals("xyz/[email protected]",
      p.getProperty("kerberos.principal"));
  Assert.assertEquals("thekeytab", p.getProperty("kerberos.keytab"));
  Assert.assertEquals("true",
      p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestAuthFilter.java

示例6: getLoggerAddresses

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
private static List<InetSocketAddress> getLoggerAddresses(URI uri)
    throws IOException {
  String authority = uri.getAuthority();
  Preconditions.checkArgument(authority != null && !authority.isEmpty(),
      "URI has no authority: " + uri);
  
  String[] parts = StringUtils.split(authority, ';');
  for (int i = 0; i < parts.length; i++) {
    parts[i] = parts[i].trim();
  }

  if (parts.length % 2 == 0) {
    LOG.warn("Quorum journal URI '" + uri + "' has an even number " +
        "of Journal Nodes specified. This is not recommended!");
  }
  
  List<InetSocketAddress> addrs = Lists.newArrayList();
  for (String addr : parts) {
    addrs.add(NetUtils.createSocketAddr(
        addr, DFSConfigKeys.DFS_JOURNALNODE_RPC_PORT_DEFAULT));
  }
  return addrs;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:QuorumJournalManager.java

示例7: IPFailoverProxyProvider

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
public IPFailoverProxyProvider(Configuration conf, URI uri,
    Class<T> xface) {
  Preconditions.checkArgument(
      xface.isAssignableFrom(NamenodeProtocols.class),
      "Interface class %s is not a valid NameNode protocol!");
  this.xface = xface;
  this.nameNodeUri = uri;

  this.conf = new Configuration(conf);
  int maxRetries = this.conf.getInt(
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_KEY,
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      maxRetries);
  
  int maxRetriesOnSocketTimeouts = this.conf.getInt(
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      maxRetriesOnSocketTimeouts);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:IPFailoverProxyProvider.java

示例8: testFailToStartWithBadConfig

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
@Test(timeout=100000)
public void testFailToStartWithBadConfig() throws Exception {
  Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, "non-absolute-path");
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  assertJNFailsToStart(conf, "should be an absolute path");
  
  // Existing file which is not a directory 
  File existingFile = new File(TEST_BUILD_DATA, "testjournalnodefile");
  assertTrue(existingFile.createNewFile());
  try {
    conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
        existingFile.getAbsolutePath());
    assertJNFailsToStart(conf, "Not a directory");
  } finally {
    existingFile.delete();
  }
  
  // Directory which cannot be created
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
      Shell.WINDOWS ? "\\\\cannotBeCreated" : "/proc/does-not-exist");
  assertJNFailsToStart(conf, "Cannot create directory");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestJournalNode.java

示例9: setUp

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
      .createJournalURI("/bootstrapStandby").toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      SlowCodec.class.getCanonicalName());
  CompressionCodecFactory.setCodecClasses(conf,
      ImmutableList.<Class> of(SlowCodec.class));
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
      .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
          new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
          new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
  cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
      .numDataNodes(1).manageNameDfsSharedDirs(false).build();
  cluster.waitActive();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestBootstrapStandbyWithBKJM.java

示例10: testParentDirectoryNameIsCorrect

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
@Test
/**
 * This test verifies that error string contains the
 * right parent directory name if the operation fails with
 * PathComponentTooLongException
 */
public void testParentDirectoryNameIsCorrect() throws Exception {
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 20);
  mkdirs("/user", null);
  mkdirs("/user/testHome", null);
  mkdirs("/user/testHome/FileNameLength", null);

  mkdirCheckParentDirectory(
    "/user/testHome/FileNameLength/really_big_name_0003_fail",
    "/user/testHome/FileNameLength", PathComponentTooLongException.class);

  renameCheckParentDirectory("/user/testHome/FileNameLength",
    "/user/testHome/really_big_name_0003_fail", "/user/testHome/",
    PathComponentTooLongException.class);

}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestFsLimits.java

示例11: initCluster

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
/**
 * Initializes the cluster.
 *
 * @param numDataNodes number of datanodes
 * @param storagesPerDatanode number of storage locations on each datanode
 * @param failedVolumesTolerated number of acceptable volume failures
 * @throws Exception if there is any failure
 */
private void initCluster(int numDataNodes, int storagesPerDatanode,
    int failedVolumesTolerated) throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
      failedVolumesTolerated);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
      .storagesPerDatanode(storagesPerDatanode).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
  long dnCapacity = DFSTestUtil.getDatanodeCapacity(
      cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
  volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestDataNodeVolumeFailureReporting.java

示例12: FSImage

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
/**
 * Construct the FSImage. Set the default checkpoint directories.
 *
 * Setup storage and initialize the edit log.
 *
 * @param conf Configuration
 * @param imageDirs Directories the image can be stored in.
 * @param editsDirs Directories the editlog can be stored in.
 * @throws IOException if directories are invalid.
 */
protected FSImage(Configuration conf,
                  Collection<URI> imageDirs,
                  List<URI> editsDirs)
    throws IOException {
  this.conf = conf;

  storage = new NNStorage(conf, imageDirs, editsDirs);
  if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,
                     DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) {
    storage.setRestoreFailedStorage(true);
  }

  this.editLog = new FSEditLog(conf, storage, editsDirs);
  
  archivalManager = new NNStorageRetentionManager(conf, storage, editLog);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:FSImage.java

示例13: testStartStop

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
@Test
public void testStartStop() throws IOException {
  Configuration conf = new Configuration();
  MiniJournalCluster c = new MiniJournalCluster.Builder(conf)
    .build();
  try {
    URI uri = c.getQuorumJournalURI("myjournal");
    String[] addrs = uri.getAuthority().split(";");
    assertEquals(3, addrs.length);
    
    JournalNode node = c.getJournalNode(0);
    String dir = node.getConf().get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY);
    assertEquals(
        new File(MiniDFSCluster.getBaseDirectory() + "journalnode-0")
          .getAbsolutePath(),
        dir);
  } finally {
    c.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestMiniJournalCluster.java

示例14: testCreateWithNoDN

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
/**
 * Test for catching "no datanode" IOException, when to create a file
 * but datanode is not running for some reason.
 */
@Test(timeout=300000)
public void testCreateWithNoDN() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    cluster.waitActive();
    FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);
    fs.create(new Path("/testnodatanode"));
    Assert.fail("No exception was thrown");
  } catch (IOException ex) {
    GenericTestUtils.assertExceptionContains("Failed to find datanode", ex);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestWebHDFS.java

示例15: startUp

import org.apache.hadoop.hdfs.DFSConfigKeys; //导入依赖的package包/类
@BeforeClass
public static void startUp() throws IOException {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf)
      .format(true)
      .numDataNodes(DATANODE_NUM)
      .nameNodePort(NameNode.DEFAULT_PORT)
      .waitSafeMode(true)
      .build();
  fs = cluster.getFileSystem();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestFileTruncate.java


注:本文中的org.apache.hadoop.hdfs.DFSConfigKeys类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。