当前位置: 首页>>代码示例>>Java>>正文


Java StartupOption.REGULAR属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.REGULAR属性的典型用法代码示例。如果您正苦于以下问题:Java StartupOption.REGULAR属性的具体用法?Java StartupOption.REGULAR怎么用?Java StartupOption.REGULAR使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption的用法示例。


在下文中一共展示了StartupOption.REGULAR属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: parseArguments

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
private static boolean parseArguments(String args[],
                                      Configuration conf) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      System.exit(-1);
    } else if ("-rollback".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if ("-regular".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else
      return false;
  }
  setStartupOption(conf, startOpt);
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:AvatarDataNode.java

示例2: parseArguments

private static StartupOption parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else
      return null;
  }
  return startOpt;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:22,代码来源:NameNode.java

示例3: parseArguments

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
private static boolean parseArguments(String args[], 
                                      Configuration conf) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      System.exit(-1);
    } else if ("-rollback".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if ("-regular".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else
      return false;
  }
  setStartupOption(conf, startOpt);
  return true;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:25,代码来源:DataNode.java

示例4: parseArguments

private static StartupOption parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.BACKUP;
    } else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.CHECKPOINT;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else
      return null;
  }
  return startOpt;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:26,代码来源:NameNode.java

示例5: testNNStorageStates

/**
 * This test iterates over the testCases table and attempts
 * to startup the NameNode normally.
 */
public void testNNStorageStates() throws Exception {
  String[] baseDirs;

  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);      
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    for (int i = 0; i < NUM_NN_TEST_CASES; i++) {
      boolean[] testCase = testCases[i];
      boolean shouldRecover = testCase[5];
      boolean curAfterRecover = testCase[6];
      boolean prevAfterRecover = testCase[7];

      log("NAME_NODE recovery", numDirs, i, testCase);
      baseDirs = createStorageState(NAME_NODE, testCase);
      if (shouldRecover) {
        cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
        checkResult(NAME_NODE, baseDirs, curAfterRecover, prevAfterRecover);
        cluster.shutdown();
      } else {
        try {
          cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
          throw new AssertionError("NameNode should have failed to start");
        } catch (IOException expected) {
          // the exception is expected
          // check that the message says "not formatted" 
          // when storage directory is empty (case #5)
          if(!testCases[i][0] && !testCases[i][2] 
                && !testCases[i][1] && !testCases[i][3] && !testCases[i][4]) {
            assertTrue(expected.getLocalizedMessage().contains(
                "NameNode is not formatted"));
          }
        }
      }
      cluster.shutdown();
    } // end testCases loop
  } // end numDirs loop
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:42,代码来源:TestDFSStorageStateRecovery.java

示例6: testDNStorageStates

/**
 * This test iterates over the testCases table and attempts
 * to startup the DataNode normally.
 */
public void testDNStorageStates() throws Exception {
  String[] baseDirs;

  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);      
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    for (int i = 0; i < NUM_DN_TEST_CASES; i++) {
      boolean[] testCase = testCases[i];
      boolean shouldRecover = testCase[5];
      boolean curAfterRecover = testCase[6];
      boolean prevAfterRecover = testCase[7];

      log("DATA_NODE recovery", numDirs, i, testCase);
      createStorageState(NAME_NODE,
                         new boolean[] {true, true, false, false, false});
      cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
      baseDirs = createStorageState(DATA_NODE, testCase);
      if (!testCase[0] && !testCase[1] && !testCase[2] && !testCase[3]) {
        // DataNode will create and format current if no directories exist
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      } else {
        if (shouldRecover) {
          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
          checkResult(DATA_NODE, baseDirs, curAfterRecover, prevAfterRecover);
        } else {
          try {
            cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
            throw new AssertionError("DataNode should have failed to start");
          } catch (Exception expected) {
            // expected
          }
        }
      }
      cluster.shutdown();
    } // end testCases loop
  } // end numDirs loop
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:42,代码来源:TestDFSStorageStateRecovery.java

示例7: testFinalize

/**
 * This test attempts to finalize the NameNode and DataNode.
 */
public void testFinalize() throws Exception {
  UpgradeUtilities.initialize();
  
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    /* This test requires that "current" directory not change after
     * the upgrade. Actually it is ok for those contents to change.
     * For now disabling block verification so that the contents are 
     * not changed.
     */
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
    String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
    
    log("Finalize with existing previous dir", numDirs);
    UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
    UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
    UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
    UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
    cluster = new MiniDFSCluster(conf, 1, StartupOption.REGULAR);
    cluster.finalizeCluster(conf);
    checkResult(nameNodeDirs, dataNodeDirs);

    log("Finalize without existing previous dir", numDirs);
    cluster.finalizeCluster(conf);
    checkResult(nameNodeDirs, dataNodeDirs);

    cluster.shutdown();
    UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    UpgradeUtilities.createEmptyDirs(dataNodeDirs);
  } // end numDir loop
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:36,代码来源:TestDFSFinalize.java

示例8: testVersions

/**
 * This test ensures the appropriate response (successful or failure) from 
 * a Datanode when the system is started with differing version combinations. 
 * <pre>
 * For each 3-tuple in the cross product
 *   ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
 *    {currentNamespaceId,incorrectNamespaceId},
 *    {pastFsscTime,currentFsscTime,futureFsscTime})
 *      1. Startup Namenode with version file containing 
 *         (currentLayoutVersion,currentNamespaceId,currentFsscTime)
 *      2. Attempt to startup Datanode with version file containing 
 *         this iterations version 3-tuple
 * </pre>
 */
public void testVersions() throws Exception {
  UpgradeUtilities.initialize();
  Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 
                                                    new Configuration());
  StorageInfo[] versions = initializeVersions();
  UpgradeUtilities.createStorageDirs(
                                     NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
  cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
  StorageInfo nameNodeVersion = new StorageInfo(
                                                UpgradeUtilities.getCurrentLayoutVersion(),
                                                UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                UpgradeUtilities.getCurrentFsscTime(cluster));
  log("NameNode version info", NAME_NODE, null, nameNodeVersion);
  int namespaceId = cluster.getNameNode().getNamespaceID();
  for (int i = 0; i < versions.length; i++) {
    File[] storage = UpgradeUtilities.createStorageDirs(
                                                        DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
    log("DataNode version info", DATA_NODE, i, versions[i]);
    UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i], namespaceId);
    try {
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    } catch (Exception ignore) {
      // Ignore.  The asserts below will check for problems.
      // ignore.printStackTrace();
    }
    assertTrue(cluster.getNameNode() != null);
    assertEquals(isVersionCompatible(nameNodeVersion, versions[i]),
                 cluster.isDataNodeUp());
    cluster.shutdownDataNodes();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:45,代码来源:TestDFSStartupVersions.java

示例9: loadFSImage

void loadFSImage(StartupOption startOpt, Configuration conf) 
    throws IOException {
  // format before starting up if requested
  if (startOpt == StartupOption.FORMAT) {
    fsImage.format();
    startOpt = StartupOption.REGULAR;
  }
  try {
    boolean saveNamespace =
        fsImage.recoverTransitionRead(startOpt);
    if (saveNamespace) {
      fsImage.saveNamespace();
    }
    if (conf.getBoolean("dfs.namenode.openlog", true)) {
      fsImage.openEditLog();
    }
  } catch (IOException e) {
    NameNode.LOG.fatal("Exception when loading the image,", e);
    fsImage.close();
    throw e;
  }
  writeLock();
  try {
    this.ready = true;
    this.nameCache.initialized();
    cond.signalAll();
  } finally {
    writeUnlock();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:30,代码来源:FSDirectory.java

示例10: parseArguments

private static StartupOptionAndService parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  String serviceName = null;
  boolean failOnTxIdMismatch = true;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.SERVICE.getName().equalsIgnoreCase(cmd)) {
      if (++i < argsLen) {
        serviceName = args[i];
      } else {
        return null;
      }
    } else if (StartupOption.IGNORETXIDMISMATCH.getName().equalsIgnoreCase(cmd)) {
      failOnTxIdMismatch = false;
    } else if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else {
      return null;
    }
  }
  return new StartupOptionAndService(startOpt, serviceName,
      failOnTxIdMismatch);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:34,代码来源:NameNode.java

示例11: parseArguments

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
private static boolean parseArguments(String args[],
                                      Configuration conf) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      System.exit(-1);
    } else if ("-rollback".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if ("-regular".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if ("-d".equalsIgnoreCase(cmd)) {
      ++i;
      if(i >= argsLen) {
        LOG.error("-D option requires following argument.");
        System.exit(-1);
      }
      String[] keyval = args[i].split("=", 2);
      if (keyval.length == 2) {
        conf.set(keyval[0], keyval[1]);
      } else {
        LOG.error("-D option invalid (expected =): " + args[i]);
        System.exit(-1);
      }
    } else
      return false;
  }
  setStartupOption(conf, startOpt);
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:38,代码来源:DataNode.java

示例12: testVersions

/**
 * This test ensures the appropriate response (successful or failure) from 
 * a Datanode when the system is started with differing version combinations. 
 * <pre>
 * For each 3-tuple in the cross product
 *   ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
 *    {currentNamespaceId,incorrectNamespaceId},
 *    {pastFsscTime,currentFsscTime,futureFsscTime})
 *      1. Startup Namenode with version file containing 
 *         (currentLayoutVersion,currentNamespaceId,currentFsscTime)
 *      2. Attempt to startup Datanode with version file containing 
 *         this iterations version 3-tuple
 * </pre>
 */
public void testVersions() throws Exception {
  UpgradeUtilities.initialize();
  Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 
                                                    new Configuration());
  StorageInfo[] versions = initializeVersions();
  UpgradeUtilities.createStorageDirs(
                                     NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
  cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
  StorageInfo nameNodeVersion = new StorageInfo(
                                                UpgradeUtilities.getCurrentLayoutVersion(),
                                                UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                UpgradeUtilities.getCurrentFsscTime(cluster));
  log("NameNode version info", NAME_NODE, null, nameNodeVersion);
  for (int i = 0; i < versions.length; i++) {
    File[] storage = UpgradeUtilities.createStorageDirs(
                                                        DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
    log("DataNode version info", DATA_NODE, i, versions[i]);
    UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i]);
    try {
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    } catch (Exception ignore) {
      // Ignore.  The asserts below will check for problems.
      // ignore.printStackTrace();
    }
    assertTrue(cluster.getNameNode() != null);
    assertEquals(isVersionCompatible(nameNodeVersion, versions[i]),
                 cluster.isDataNodeUp());
    cluster.shutdownDataNodes();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:44,代码来源:TestDFSStartupVersions.java

示例13: createNameNode

private NameNode createNameNode(int nnIndex, Configuration conf,
    int numDataNodes,
    boolean manageNameDfsDirs,
    boolean format,
    StartupOption operation,
    String nameServiceId) throws IOException {
  // Setup the NameNode configuration
  if (manageNameDfsDirs) {
    if (this.nameNodes[nnIndex] != null) {
      Configuration nnconf = this.nameNodes[nnIndex].conf;
      conf.set("dfs.name.dir", nnconf.get("dfs.name.dir"));
      String editsDir = nnconf.get("dfs.name.edits.dir");
      if (editsDir != null) {
        conf.set("dfs.name.edits.dir", editsDir);
      }
      conf.set("fs.checkpoint.dir", nnconf.get("fs.checkpoint.dir"));
    } else {
      conf.set("dfs.name.dir", new File(base_dir, "name" + (2*nnIndex + 1)).getPath()+","+
               new File(base_dir, "name" + (2*nnIndex + 2)).getPath());
      conf.set("fs.checkpoint.dir", new File(base_dir, "namesecondary" + (2*nnIndex + 1)).
                getPath()+"," + new File(base_dir, "namesecondary" + (2*nnIndex + 2)).getPath());
    }
  }

  
  // Format and clean out DataNode directories
  if (format) {
    Configuration newConf = conf;
    if (federation) {
      newConf = new Configuration(conf);
      NameNode.initializeGenericKeys(newConf, nameServiceId);
    }
    NameNode.format(newConf);
  }
  // Start the NameNode
  String[] args;
  ArrayList<String> argList = new ArrayList<String>();
  if (!(operation == null ||
        operation == StartupOption.FORMAT ||
        operation == StartupOption.REGULAR)) {
    argList.add(operation.getName());
  } 
  if (federation) {
    argList.add(StartupOption.SERVICE.getName());
    argList.add(nameServiceId);
    conf = new Configuration(conf);
  }
  args = new String[argList.size()];
  argList.toArray(args);
  return NameNode.createNameNode(args, conf);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:51,代码来源:MiniDFSCluster.java

示例14: testDistributedUpgrade

/**
 */
public void testDistributedUpgrade() throws Exception {
  int numDirs = 1;
  TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage();
  testImg.unpackStorage();
  int numDNs = testImg.numDataNodes;

  // register new upgrade objects (ignore all existing)
  UpgradeObjectCollection.initialize();
  UpgradeObjectCollection.registerUpgrade(new UO_Datanode1());
  UpgradeObjectCollection.registerUpgrade(new UO_Namenode1());
  UpgradeObjectCollection.registerUpgrade(new UO_Datanode2());
  UpgradeObjectCollection.registerUpgrade(new UO_Namenode2());
  UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
  UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());

  conf = new Configuration();
  if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
    System.setProperty("test.build.data", "build/test/data");
  }
  conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off

  log("NameNode start in regular mode when dustributed upgrade is required", numDirs);
  startNameNodeShouldFail(StartupOption.REGULAR);

  log("Start NameNode only distributed upgrade", numDirs);
  // cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
  cluster = new MiniDFSCluster(0, conf, 0, false, true,
                                StartupOption.UPGRADE, null);
  cluster.shutdown();

  log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
  startNameNodeShouldFail(StartupOption.REGULAR);

  log("NameNode rollback to the old version that require a dustributed upgrade", numDirs);
  startNameNodeShouldFail(StartupOption.ROLLBACK);

  log("Normal distributed upgrade for the cluster", numDirs);
  cluster = new MiniDFSCluster(0, conf, numDNs, false, true,
                                StartupOption.UPGRADE, null);
  DFSAdmin dfsAdmin = new DFSAdmin();
  dfsAdmin.setConf(conf);
  dfsAdmin.run(new String[] {"-safemode", "wait"});
  dfsAdmin.run(new String[] {"-finalizeUpgrade"});
  cluster.shutdown();

  // it should be ok to start in regular mode
  log("NameCluster regular startup after the upgrade", numDirs);
  cluster = new MiniDFSCluster(0, conf, numDNs, false, true,
                                StartupOption.REGULAR, null);
  cluster.waitActive();
  cluster.shutdown();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:54,代码来源:TestDistributedUpgrade.java

示例15: MiniDFSCluster

/**
 * NOTE: if possible, the other constructors that don't have nameNode port 
 * parameter should be used as they will ensure that the servers use free ports.
 * <p>
 * Modify the config and start up the servers.  
 * 
 * @param nameNodePort suggestion for which rpc port to use.  caller should
 *          use getNameNodePort() to get the actual port used.
 * @param conf the base configuration to use in starting the servers.  This
 *          will be modified as necessary.
 * @param numDataNodes Number of DataNodes to start; may be zero
 * @param format if true, format the NameNode and DataNodes before starting up
 * @param manageNameDfsDirs if true, the data directories for servers will be
 *          created and dfs.name.dir and dfs.data.dir will be set in the conf
 * @param manageDataDfsDirs if true, the data directories for datanodes will
 *          be created and dfs.data.dir set to same in the conf
 * @param operation the operation with which to start the servers.  If null
 *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
 * @param racks array of strings indicating the rack that each DataNode is on
 * @param hosts array of strings indicating the hostnames of each DataNode
 * @param simulatedCapacities array of capacities of the simulated data nodes
 */
public MiniDFSCluster(int nameNodePort, 
                      Configuration conf,
                      int numDataNodes,
                      boolean format,
                      boolean manageNameDfsDirs,
                      boolean manageDataDfsDirs,
                      StartupOption operation,
                      String[] racks, String hosts[],
                      long[] simulatedCapacities) throws IOException {
  this.conf = conf;
  base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/");
  data_dir = new File(base_dir, "data");
  
  // Setup the NameNode configuration
  FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort));
  conf.set("dfs.http.address", "127.0.0.1:0");  
  if (manageNameDfsDirs) {
    conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+
             new File(base_dir, "name2").getPath());
    conf.set("fs.checkpoint.dir", new File(base_dir, "namesecondary1").
              getPath()+"," + new File(base_dir, "namesecondary2").getPath());
  }
  
  int replication = conf.getInt("dfs.replication", 3);
  conf.setInt("dfs.replication", Math.min(replication, numDataNodes));
  int safemodeExtension = conf.getInt("dfs.safemode.extension.testing", 0);
  conf.setInt("dfs.safemode.extension", safemodeExtension);
  conf.setInt("dfs.namenode.decommission.interval", 3); // 3 second

  // Set a small delay on blockReceived in the minicluster to approximate
  // a real cluster a little better and suss out bugs.
  conf.setInt("dfs.datanode.artificialBlockReceivedDelay", 5);
  
  // Format and clean out DataNode directories
  if (format) {
    if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
      throw new IOException("Cannot remove data directory: " + data_dir);
    }
    NameNode.format(conf); 
  }
  
  // Start the NameNode
  String[] args = (operation == null ||
                   operation == StartupOption.FORMAT ||
                   operation == StartupOption.REGULAR) ?
    new String[] {} : new String[] {operation.getName()};
  conf.setClass("topology.node.switch.mapping.impl", 
                 StaticMapping.class, DNSToSwitchMapping.class);
  nameNode = NameNode.createNameNode(args, conf);
  
  // Start the DataNodes
  startDataNodes(conf, numDataNodes, manageDataDfsDirs, 
                  operation, racks, hosts, simulatedCapacities);
  waitClusterUp();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:77,代码来源:MiniDFSCluster.java


注:本文中的org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.REGULAR属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。