当前位置: 首页>>代码示例>>Java>>正文


Java StartupOption.getName方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.getName方法的典型用法代码示例。如果您正苦于以下问题:Java StartupOption.getName方法的具体用法?Java StartupOption.getName怎么用?Java StartupOption.getName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption的用法示例。


在下文中一共展示了StartupOption.getName方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: startNameNode

import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入方法依赖的package包/类
NameNode startNameNode( Configuration conf,
                        String imageDirs,
                        String editsDirs,
                        StartupOption start) throws IOException {
  conf.set("fs.default.name", "hdfs://localhost:0");
  conf.set("dfs.http.address", "0.0.0.0:0");  
  conf.set("dfs.name.dir", imageDirs);
  conf.set("dfs.name.edits.dir", editsDirs);
  String[] args = new String[]{start.getName()};
  NameNode nn = NameNode.createNameNode(args, conf);
  return nn;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:13,代码来源:TestCheckpoint.java

示例2: startNameNode

import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入方法依赖的package包/类
NameNode startNameNode( Configuration conf,
                        String imageDirs,
                        String editsDirs,
                        StartupOption start) throws IOException {
  conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, imageDirs);
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsDirs);
  String[] args = new String[]{start.getName()};
  NameNode nn = NameNode.createNameNode(args, conf);
  Assert.assertTrue(nn.isInSafeMode());
  return nn;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:14,代码来源:TestCheckPointForSecurityTokens.java

示例3: startNameNode

import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入方法依赖的package包/类
NameNode startNameNode( Configuration conf,
                        String imageDirs,
                        String editsDirs,
                        StartupOption start) throws IOException {
  conf.set("fs.default.name", "hdfs://localhost:0");
  conf.set("dfs.http.address", "0.0.0.0:0");  
  conf.set("dfs.name.dir", imageDirs);
  conf.set("dfs.name.edits.dir", editsDirs);
  String[] args = new String[]{start.getName()};
  NameNode nn = NameNode.createNameNode(args, conf);
  assertTrue(nn.isInSafeMode());
  return nn;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:14,代码来源:TestCheckpoint.java

示例4: startNameNode

import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入方法依赖的package包/类
NameNode startNameNode( Configuration conf,
                        String imageDirs,
                        String editsDirs,
                        StartupOption start) throws IOException {
  conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, imageDirs);
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsDirs);
  String[] args = new String[]{start.getName()};
  NameNode nn = NameNode.createNameNode(args, conf);
  assertTrue(nn.isInSafeMode());
  return nn;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:14,代码来源:TestCheckpoint.java

示例5: MiniDFSCluster

import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入方法依赖的package包/类
/**
 * NOTE: if possible, the other constructors that don't have nameNode port 
 * parameter should be used as they will ensure that the servers use free ports.
 * <p>
 * Modify the config and start up the servers.  
 * 
 * @param nameNodePort suggestion for which rpc port to use.  caller should
 *          use getNameNodePort() to get the actual port used.
 * @param conf the base configuration to use in starting the servers.  This
 *          will be modified as necessary.
 * @param numDataNodes Number of DataNodes to start; may be zero
 * @param format if true, format the NameNode and DataNodes before starting up
 * @param manageNameDfsDirs if true, the data directories for servers will be
 *          created and dfs.name.dir and dfs.data.dir will be set in the conf
 * @param manageDataDfsDirs if true, the data directories for datanodes will
 *          be created and dfs.data.dir set to same in the conf
 * @param operation the operation with which to start the servers.  If null
 *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
 * @param racks array of strings indicating the rack that each DataNode is on
 * @param hosts array of strings indicating the hostnames of each DataNode
 * @param simulatedCapacities array of capacities of the simulated data nodes
 */
public MiniDFSCluster(int nameNodePort, 
                      Configuration conf,
                      int numDataNodes,
                      boolean format,
                      boolean manageNameDfsDirs,
                      boolean manageDataDfsDirs,
                      StartupOption operation,
                      String[] racks, String hosts[],
                      long[] simulatedCapacities) throws IOException {
  this.conf = conf;
  base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/");
  data_dir = new File(base_dir, "data");
  
  // Setup the NameNode configuration
  FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort));
  conf.set("dfs.http.address", "127.0.0.1:0");  
  if (manageNameDfsDirs) {
    conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+
             new File(base_dir, "name2").getPath());
    conf.set("fs.checkpoint.dir", new File(base_dir, "namesecondary1").
              getPath()+"," + new File(base_dir, "namesecondary2").getPath());
  }
  
  int replication = conf.getInt("dfs.replication", 3);
  conf.setInt("dfs.replication", Math.min(replication, numDataNodes));
  int safemodeExtension = conf.getInt("dfs.safemode.extension.testing", 0);
  conf.setInt("dfs.safemode.extension", safemodeExtension);
  conf.setInt("dfs.namenode.decommission.interval", 3); // 3 second

  // Set a small delay on blockReceived in the minicluster to approximate
  // a real cluster a little better and suss out bugs.
  conf.setInt("dfs.datanode.artificialBlockReceivedDelay", 5);
  
  // Format and clean out DataNode directories
  if (format) {
    if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
      throw new IOException("Cannot remove data directory: " + data_dir);
    }
    NameNode.format(conf); 
  }
  
  // Start the NameNode
  String[] args = (operation == null ||
                   operation == StartupOption.FORMAT ||
                   operation == StartupOption.REGULAR) ?
    new String[] {} : new String[] {operation.getName()};
  conf.setClass("topology.node.switch.mapping.impl", 
                 StaticMapping.class, DNSToSwitchMapping.class);
  nameNode = NameNode.createNameNode(args, conf);
  
  // Start the DataNodes
  startDataNodes(conf, numDataNodes, manageDataDfsDirs, 
                  operation, racks, hosts, simulatedCapacities);
  waitClusterUp();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:78,代码来源:MiniDFSCluster.java

示例6: initMiniDFSCluster

import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入方法依赖的package包/类
private void initMiniDFSCluster(int nameNodePort, Configuration conf,
    int numDataNodes, boolean format, boolean manageNameDfsDirs,
    boolean manageDataDfsDirs, StartupOption operation, String[] racks,
    String[] hosts, long[] simulatedCapacities) throws IOException {
  this.conf = conf;
  base_dir = new File(getBaseDirectory());
  data_dir = new File(base_dir, "data");
  
  // use alternate RPC engine if spec'd
  String rpcEngineName = System.getProperty("hdfs.rpc.engine");
  if (rpcEngineName != null && !"".equals(rpcEngineName)) {
    
    System.out.println("HDFS using RPCEngine: "+rpcEngineName);
    try {
      Class<?> rpcEngine = conf.getClassByName(rpcEngineName);
      setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
      setRpcEngine(conf, NamenodeProtocol.class, rpcEngine);
      setRpcEngine(conf, ClientProtocol.class, rpcEngine);
      setRpcEngine(conf, DatanodeProtocol.class, rpcEngine);
      setRpcEngine(conf, RefreshAuthorizationPolicyProtocol.class, rpcEngine);
      setRpcEngine(conf, RefreshUserMappingsProtocol.class, rpcEngine);
    } catch (ClassNotFoundException e) {
      throw new RuntimeException(e);
    }

    // disable service authorization, as it does not work with tunnelled RPC
    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
                    false);
  }

  // Setup the NameNode configuration
  FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort));
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");  
  if (manageNameDfsDirs) {
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        fileAsURI(new File(base_dir, "name1"))+","+
        fileAsURI(new File(base_dir, "name2")));
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
        fileAsURI(new File(base_dir, "namesecondary1"))+","+
        fileAsURI(new File(base_dir, "namesecondary2")));
  }
  
  int replication = conf.getInt("dfs.replication", 3);
  conf.setInt("dfs.replication", Math.min(replication, numDataNodes));
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
  conf.setInt("dfs.namenode.decommission.interval", 3); // 3 second
  
  // Format and clean out DataNode directories
  if (format) {
    if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
      throw new IOException("Cannot remove data directory: " + data_dir);
    }
    NameNode.format(conf); 
  }
  
  // Start the NameNode
  String[] args = (operation == null ||
                   operation == StartupOption.FORMAT ||
                   operation == StartupOption.REGULAR) ?
    new String[] {} : new String[] {operation.getName()};
  conf.setClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                 StaticMapping.class, DNSToSwitchMapping.class);
  nameNode = NameNode.createNameNode(args, conf);
  
  // Start the DataNodes
  startDataNodes(conf, numDataNodes, manageDataDfsDirs, 
                  operation, racks, hosts, simulatedCapacities);
  waitClusterUp();

  //make sure ProxyUsers uses the latest conf
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:73,代码来源:MiniDFSCluster.java

示例7: getBackupNodeDir

import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入方法依赖的package包/类
static String getBackupNodeDir(StartupOption t, int i) {
  return BASE_DIR + "name" + t.getName() + i + "/";
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:4,代码来源:TestBackupNode.java


注:本文中的org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.getName方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。