当前位置: 首页>>代码示例>>Java>>正文


Java HdfsConfiguration类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.HdfsConfiguration的典型用法代码示例。如果您正苦于以下问题:Java HdfsConfiguration类的具体用法?Java HdfsConfiguration怎么用?Java HdfsConfiguration使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HdfsConfiguration类属于org.apache.hadoop.hdfs包,在下文中一共展示了HdfsConfiguration类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doGet

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final PrintWriter out = response.getWriter();
  final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();

  final ServletContext context = getServletContext();
  final DataNode datanode = (DataNode) context.getAttribute("datanode");
  final Configuration conf = 
    new HdfsConfiguration(datanode.getConf());
  
  try {
    final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, 
        datanode, conf, getUGI(request, conf));
    final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
    MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
    writeXml(ioe, path, xml);
  } catch (InterruptedException e) {
    writeXml(e, path, xml);
  }
  xml.endDocument();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:FileChecksumServlets.java

示例2: createNameNodeProxy

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
      ClientProtocol.class).getProxy();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DfsServlet.java

示例3: testAuditLogger

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
/**
 * Tests that AuditLogger works as expected.
 */
@Test
public void testAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    assertEquals(1, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestAuditLogger.java

示例4: testDisableTopAuditLogger

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
/**
 * Tests that TopAuditLogger can be disabled
 */
@Test
public void testDisableTopAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(NNTOP_ENABLED_KEY, false);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    List<AuditLogger> auditLoggers =
        cluster.getNameNode().getNamesystem().getAuditLoggers();
    for (AuditLogger auditLogger : auditLoggers) {
      assertFalse(
          "top audit logger is still hooked in after it is disabled",
          auditLogger instanceof TopAuditLogger);
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestAuditLogger.java

示例5: startUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
/**
 * Starts an instance of DataNode
 * @throws IOException
 */
@Before
public void startUp() throws IOException, URISyntaxException {
  tearDownDone = false;
  conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
  conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
  conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
  conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
  FileSystem.setDefaultUri(conf,
      "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
  File dataDir = new File(DATA_DIR);
  FileUtil.fullyDelete(dataDir);
  dataDir.mkdirs();
  StorageLocation location = StorageLocation.parse(dataDir.getPath());
  locations.add(location);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestDatanodeProtocolRetryPolicy.java

示例6: setUp

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER),
      "group1");
  config.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  configureSuperUserIPAddresses(config, REAL_USER);
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).build();
  cluster.waitActive();
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  ugi = UserGroupInformation.createRemoteUser(REAL_USER);
  proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi,
      GROUP_NAMES);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestDelegationTokenForProxyUser.java

示例7: createSecureConfig

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
/**
 * Creates configuration for starting a secure cluster.
 *
 * @param dataTransferProtection supported QOPs
 * @return configuration for starting a secure cluster
 * @throws Exception if there is any failure
 */
protected HdfsConfiguration createSecureConfig(
    String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:SaslDataTransferTestCase.java

示例8: getConf

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
private Configuration getConf() {
  Configuration conf = new HdfsConfiguration();

  // Lower the heart beat interval so the NN quickly learns of dead
  // or decommissioned DNs and the NN issues replication and invalidation
  // commands quickly (as replies to heartbeats)
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);

  // Have the NN ReplicationMonitor compute the replication and
  // invalidation commands to send DNs every second.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);

  // Have the NN check for pending replications every second so it
  // quickly schedules additional replicas as they are identified.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);

  // The DNs report blocks every second.
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);

  // Indicates we have multiple racks
  conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestHostsFiles.java

示例9: testInvalidateOverReplicatedBlock

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestOverReplicatedBlocks.java

示例10: testFinalizedReplicas

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
@Test public void testFinalizedReplicas() throws Exception {
  // bring up a cluster of 3
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    // test finalized replicas
    final String TopDir = "/test";
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("TestDatanodeRestart").setNumFiles(2).build();
    util.createFiles(fs, TopDir, (short)3);
    util.waitReplication(fs, TopDir, (short)3);
    util.checkFiles(fs, TopDir);
    cluster.restartDataNodes();
    cluster.waitActive();
    util.checkFiles(fs, TopDir);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestDatanodeRestart.java

示例11: testFSNamespaceClearLeases

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
/**
 * Test that FSNamesystem#clear clears all leases.
 */
@Test
public void testFSNamespaceClearLeases() throws Exception {
  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
  LeaseManager leaseMan = fsn.getLeaseManager();
  leaseMan.addLease("client1", "importantFile");
  assertEquals(1, leaseMan.countLease());
  fsn.clear();
  leaseMan = fsn.getLeaseManager();
  assertEquals(0, leaseMan.countLease());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestFSNamesystem.java

示例12: startBackupNode

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
BackupNode startBackupNode(Configuration conf,
                           StartupOption startupOpt,
                           int idx) throws IOException {
  Configuration c = new HdfsConfiguration(conf);
  String dirs = getBackupNodeDir(startupOpt, idx);
  c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
  c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
      "127.0.0.1:0");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
          "127.0.0.1:0");

  BackupNode bn = (BackupNode)NameNode.createNameNode(
      new String[]{startupOpt.getName()}, c);
  assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
  assertTrue(bn.getRole() + " must be in StandbyState",
             bn.getNamesystem().getHAState()
               .equalsIgnoreCase(HAServiceState.STANDBY.name()));
  return bn;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestBackupNode.java

示例13: testClose

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
@Test
public void testClose() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
  
  try {
    cluster.waitActive();
    DataNode dn = cluster.getDataNodes().get(0);
    FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);

    // set up replicasMap
    String bpid = cluster.getNamesystem().getBlockPoolId();
    
    ExtendedBlock[] blocks = setup(bpid, dataSet);

    // test close
    testClose(dataSet, blocks);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestWriteToReplica.java

示例14: setup

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestNameNodeRetryCacheMetrics.java

示例15: testPreTxidEditLogWithEdits

import org.apache.hadoop.hdfs.HdfsConfiguration; //导入依赖的package包/类
/**
 * Test case for loading a very simple edit log from a format
 * prior to the inclusion of edit transaction IDs in the log.
 */
@Test
public void testPreTxidEditLogWithEdits() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();

    long numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem);
    assertEquals(3, numEdits);
    // Sanity check the edit
    HdfsFileStatus fileInfo = namesystem.getFileInfo("/myfile", false);
    assertEquals("supergroup", fileInfo.getGroup());
    assertEquals(3, fileInfo.getReplication());
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestEditLog.java


注:本文中的org.apache.hadoop.hdfs.HdfsConfiguration类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。