当前位置: 首页>>代码示例>>Java>>正文


Java NameNode类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.NameNode的典型用法代码示例。如果您正苦于以下问题:Java NameNode类的具体用法?Java NameNode怎么用?Java NameNode使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


NameNode类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了NameNode类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testDfsClientFailover

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);
  
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  
  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestDFSClientFailover.java

示例2: start

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
@Override
public void start(Object service) {
  NameNode nn = (NameNode)service;
  Configuration conf = null;
  try {
    conf = nn.getConf();
  } catch (NoSuchMethodError ex)
  {
    LOG.warn("No method getConf() in this NameNode : " + ex);
  }
  try {
    rpcServer = new NuCypherExtRpcServer(conf, nn);
    rpcServer.start();
    LOG.info(toString() +
        " started");
  } catch (IOException e) {
    LOG.error("Cannot create NuCypherExtRpcServer: " + e);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:20,代码来源:NuCypherExtServicePlugin.java

示例3: RpcProgramMountd

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
public RpcProgramMountd(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  // Note that RPC cache is not enabled
  super("mountd", "localhost", config.getInt(
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
      VERSION_3, registrationSocket, allowInsecurePorts);
  exports = new ArrayList<String>();
  exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
      NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
  this.hostsMatcher = NfsExports.getInstance(config);
  this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
  UserGroupInformation.setConfiguration(config);
  SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
  this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:RpcProgramMountd.java

示例4: getNNServiceRpcAddresses

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
/**
 * Returns list of InetSocketAddresses corresponding to namenodes from the
 * configuration.
 * 
 * Returns namenode address specifically configured for datanodes (using
 * service ports), if found. If not, regular RPC address configured for other
 * clients is returned.
 * 
 * @param conf configuration
 * @return list of InetSocketAddress
 * @throws IOException on error
 */
public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses(
    Configuration conf) throws IOException {
  // Use default address as fall back
  String defaultAddress;
  try {
    defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
  } catch (IllegalArgumentException e) {
    defaultAddress = null;
  }
  
  Map<String, Map<String, InetSocketAddress>> addressList =
    getAddresses(conf, defaultAddress,
      DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
  if (addressList.isEmpty()) {
    throw new IOException("Incorrect configuration: namenode address "
        + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "  
        + DFS_NAMENODE_RPC_ADDRESS_KEY
        + " is not configured.");
  }
  return addressList;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:DFSUtil.java

示例5: startBackupNode

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
  // Set up testing environment directories
  hdfsDir = new File(TEST_DATA_DIR, "backupNode");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  File currDir = new File(hdfsDir, "name2");
  File currDir2 = new File(currDir, "current");
  File currDir3 = new File(currDir, "image");
  
  assertTrue(currDir.mkdirs());
  assertTrue(currDir2.mkdirs());
  assertTrue(currDir3.mkdirs());
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name2")).toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  
  // Start BackupNode
  String[] args = new String [] { StartupOption.BACKUP.getName() };
  BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);

  return bu;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSServerPorts.java

示例6: saveCurrentTokens

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
/**
 * Private helper methods to save delegation keys and tokens in fsimage
 */
private synchronized void saveCurrentTokens(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(currentTokens.size());
  Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
      .iterator();
  while (iter.hasNext()) {
    DelegationTokenIdentifier id = iter.next();
    id.write(out);
    DelegationTokenInformation info = currentTokens.get(id);
    out.writeLong(info.getRenewDate());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DelegationTokenSecretManager.java

示例7: loadAllKeys

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
/**
 * Private helper method to load delegation keys from fsimage.
 * @throws IOException on error
 */
private synchronized void loadAllKeys(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfKeys = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfKeys; i++) {
    DelegationKey value = new DelegationKey();
    value.readFields(in);
    addKey(value);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DelegationTokenSecretManager.java

示例8: selectToken

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
/**
 * Select the delegation token for hdfs.  The port will be rewritten to
 * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. 
 * This method should only be called by non-hdfs filesystems that do not
 * use the rpc port to acquire tokens.  Ex. webhdfs, hftp 
 * @param nnUri of the remote namenode
 * @param tokens as a collection
 * @param conf hadoop configuration
 * @return Token
 */
public Token<DelegationTokenIdentifier> selectToken(
    final URI nnUri, Collection<Token<?>> tokens,
    final Configuration conf) {
  // this guesses the remote cluster's rpc service port.
  // the current token design assumes it's the same as the local cluster's
  // rpc port unless a config key is set.  there should be a way to automatic
  // and correctly determine the value
  Text serviceName = SecurityUtil.buildTokenService(nnUri);
  final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName);
  
  int nnRpcPort = NameNode.DEFAULT_PORT;
  if (nnServiceName != null) {
    nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); 
  }
  // use original hostname from the uri to avoid unintentional host resolving
  serviceName = SecurityUtil.buildTokenService(
  		NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort));
  
  return selectToken(serviceName, tokens);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:DelegationTokenSelector.java

示例9: testSecondaryNodePorts

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
/**
 * Verify secondary namenode port usage.
 */
@Test(timeout = 300000)
public void testSecondaryNodePorts() throws Exception {
  NameNode nn = null;
  try {
    nn = startNameNode();

    // bind http server to the same port as name-node
    Configuration conf2 = new HdfsConfiguration(config);
    conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, 
              config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
    LOG.info("= Starting 1 on: " + 
                               conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
    boolean started = canStartSecondaryNode(conf2);
    assertFalse(started); // should fail

    // bind http server to a different port
    conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, THIS_HOST);
    LOG.info("= Starting 2 on: " + 
                               conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
    started = canStartSecondaryNode(conf2);
    assertTrue(started); // should start now
  } finally {
    stopNameNode(nn);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSServerPorts.java

示例10: getProxy

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
@Override
public synchronized ProxyInfo<T> getProxy() {
  // Create a non-ha proxy if not already created.
  if (nnProxyInfo == null) {
    try {
      // Create a proxy that is not wrapped in RetryProxy
      InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri);
      nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
          conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), 
          false).getProxy(), nnAddr.toString());
    } catch (IOException ioe) {
      throw new RuntimeException(ioe);
    }
  }
  return nnProxyInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:IPFailoverProxyProvider.java

示例11: run

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  parseArgs(args);
  parseConfAndFindOtherNN();
  NameNode.checkAllowFormat(conf);

  InetSocketAddress myAddr = NameNode.getAddress(conf);
  SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
      DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());

  return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
    @Override
    public Integer run() {
      try {
        return doRun();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  });
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:BootstrapStandby.java

示例12: testReadSnapshotFileWithCheckpoint

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
@Test(timeout = 30000)
public void testReadSnapshotFileWithCheckpoint() throws Exception {
  Path foo = new Path("/foo");
  hdfs.mkdirs(foo);
  hdfs.allowSnapshot(foo);
  Path bar = new Path("/foo/bar");
  DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
  hdfs.createSnapshot(foo, "s1");
  assertTrue(hdfs.delete(bar, true));

  // checkpoint
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);

  // restart namenode to load snapshot files from fsimage
  cluster.restartNameNode(true);
  String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
  DFSTestUtil.readFile(hdfs, new Path(snapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestSnapshotBlocksMap.java

示例13: removeDatanode

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
/**
 * Remove a datanode
 * @throws UnregisteredNodeException 
 */
public void removeDatanode(final DatanodeID node
    ) throws UnregisteredNodeException {
  namesystem.writeLock();
  try {
    final DatanodeDescriptor descriptor = getDatanode(node);
    if (descriptor != null) {
      removeDatanode(descriptor);
    } else {
      NameNode.stateChangeLog.warn("BLOCK* removeDatanode: "
                                   + node + " does not exist");
    }
  } finally {
    namesystem.writeUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DatanodeManager.java

示例14: removeDeadDatanode

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
/** Remove a dead datanode. */
void removeDeadDatanode(final DatanodeID nodeID) {
    synchronized(datanodeMap) {
      DatanodeDescriptor d;
      try {
        d = getDatanode(nodeID);
      } catch(IOException e) {
        d = null;
      }
      if (d != null && isDatanodeDead(d)) {
        NameNode.stateChangeLog.info(
            "BLOCK* removeDeadDatanode: lost heartbeat from " + d);
        removeDatanode(d);
      }
    }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DatanodeManager.java

示例15: setGenerationStampAndVerifyReplicas

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入依赖的package包/类
/**
 * Process the recorded replicas. When about to commit or finish the
 * pipeline recovery sort out bad replicas.
 * @param genStamp  The final generation stamp for the block.
 */
public void setGenerationStampAndVerifyReplicas(long genStamp) {
  // Set the generation stamp for the block.
  setGenerationStamp(genStamp);
  if (replicas == null)
    return;

  // Remove the replicas with wrong gen stamp.
  // The replica list is unchanged.
  for (ReplicaUnderConstruction r : replicas) {
    if (genStamp != r.getGenerationStamp()) {
      r.getExpectedStorageLocation().removeBlock(this);
      NameNode.blockStateChangeLog.info("BLOCK* Removing stale replica "
          + "from location: {}", r.getExpectedStorageLocation());
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:BlockInfoContiguousUnderConstruction.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.NameNode类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。