当前位置: 首页>>代码示例>>Java>>正文


Java StartupOption.ROLLBACK属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.ROLLBACK属性的典型用法代码示例。如果您正苦于以下问题:Java StartupOption.ROLLBACK属性的具体用法?Java StartupOption.ROLLBACK怎么用?Java StartupOption.ROLLBACK使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption的用法示例。


在下文中一共展示了StartupOption.ROLLBACK属性的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: parseArguments

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
private static boolean parseArguments(String args[],
                                      Configuration conf) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      System.exit(-1);
    } else if ("-rollback".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if ("-regular".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else
      return false;
  }
  setStartupOption(conf, startOpt);
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:AvatarDataNode.java

示例2: parseArguments

private static StartupOption parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else
      return null;
  }
  return startOpt;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:22,代码来源:NameNode.java

示例3: parseArguments

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
private static boolean parseArguments(String args[], 
                                      Configuration conf) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      System.exit(-1);
    } else if ("-rollback".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if ("-regular".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else
      return false;
  }
  setStartupOption(conf, startOpt);
  return true;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:25,代码来源:DataNode.java

示例4: parseArguments

private static StartupOption parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.BACKUP;
    } else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.CHECKPOINT;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else
      return null;
  }
  return startOpt;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:26,代码来源:NameNode.java

示例5: recoverDirectory

public static boolean recoverDirectory(StorageDirectory sd,
    StartupOption startOpt, StorageState curState, boolean checkImport)
    throws IOException {
  boolean isFormatted = false;
  // sd is locked but not opened
  switch (curState) {
  case NON_EXISTENT:
    // name-node fails if any of the configured storage dirs are missing
    throw new InconsistentFSStateException(sd.getRoot(),
        "storage directory does not exist or is not accessible.");
  case NOT_FORMATTED:
    break;
  case NORMAL:
    break;
  default: // recovery is possible
    sd.doRecover(curState);
  }
  if (curState != StorageState.NOT_FORMATTED
      && startOpt != StartupOption.ROLLBACK) {
    // read and verify consistency with other directories
    sd.read();
    isFormatted = true;
  }
  if (checkImport && startOpt == StartupOption.IMPORT && isFormatted)
    // import of a checkpoint is allowed only into empty image directories
    throw new IOException("Cannot import image from a checkpoint. "
        + " NameNode already contains an image in " + sd.getRoot());
  return isFormatted;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:NNStorage.java

示例6: parseArguments

private static StartupOptionAndService parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  String serviceName = null;
  boolean failOnTxIdMismatch = true;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.SERVICE.getName().equalsIgnoreCase(cmd)) {
      if (++i < argsLen) {
        serviceName = args[i];
      } else {
        return null;
      }
    } else if (StartupOption.IGNORETXIDMISMATCH.getName().equalsIgnoreCase(cmd)) {
      failOnTxIdMismatch = false;
    } else if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else {
      return null;
    }
  }
  return new StartupOptionAndService(startOpt, serviceName,
      failOnTxIdMismatch);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:34,代码来源:NameNode.java

示例7: parseArguments

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
private static boolean parseArguments(String args[],
                                      Configuration conf) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      System.exit(-1);
    } else if ("-rollback".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if ("-regular".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if ("-d".equalsIgnoreCase(cmd)) {
      ++i;
      if(i >= argsLen) {
        LOG.error("-D option requires following argument.");
        System.exit(-1);
      }
      String[] keyval = args[i].split("=", 2);
      if (keyval.length == 2) {
        conf.set(keyval[0], keyval[1]);
      } else {
        LOG.error("-D option invalid (expected =): " + args[i]);
        System.exit(-1);
      }
    } else
      return false;
  }
  setStartupOption(conf, startOpt);
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:38,代码来源:DataNode.java

示例8: run

@Override
public void run() {
  try {
    String dnArg = StartupOption.REGULAR.getName();
    if (startOpt != null && startOpt == StartupOption.ROLLBACK) {
      dnArg = startOpt.getName();
    }
    String[] dnArgs = { dnArg };
    int iN = curDn + i;
    Configuration dnConf = new Configuration(conf);

    if (simulatedCapacities != null) {
      dnConf.setBoolean("dfs.datanode.simulateddatastorage", true);
      dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
          simulatedCapacities[i]);
    }

    File dir1 = new File(dataDir, "data" + (2 * iN + 1));
    File dir2 = new File(dataDir, "data" + (2 * iN + 2));
    dir1.mkdirs();
    dir2.mkdirs();
    if (!dir1.isDirectory() || !dir2.isDirectory()) {
      throw new IOException(
          "Mkdirs failed to create directory for DataNode " + iN + ": "
          + dir1 + " or " + dir2);
    }
    dnConf.set("dfs.data.dir", dir1.getPath() + "," + dir2.getPath());

    LOG.info("Starting DataNode " + iN + " with dfs.data.dir: "
        + dnConf.get("dfs.data.dir"));


    if (hosts != null) {
      dnConf.set(FSConstants.SLAVE_HOST_NAME, hosts[i]);
      LOG.info("Starting DataNode " + iN + " with hostname set to: "
          + dnConf.get(FSConstants.SLAVE_HOST_NAME));
    }

    if (racks != null) {
      String name = hosts[i];
      LOG.info("Adding node with hostname : " + name + " to rack "
          + racks[i]);
      StaticMapping.addNodeToRack(name, racks[i]);
    }
    Configuration newconf = new Configuration(dnConf); // save config
    AvatarDataNode dn = instantiateDataNode(dnArgs, dnConf);
    // since the HDFS does things based on IP:port, we need to add the
    // mapping
    // for IP:port to rackId

    String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
    if (racks != null) {
      int port = dn.getSelfAddr().getPort();
      System.out.println("Adding node with IP:port : " + ipAddr + ":"
          + port + " to rack " + racks[i]);
      StaticMapping.addNodeToRack(ipAddr + ":" + port, racks[i]);
    }
    dn.runDatanodeDaemon();
    synchronized (dataNodes) {
      dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs));
    }
  } catch (IOException e) {
    LOG.error("Exception when creating datanode", e);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:65,代码来源:MiniAvatarCluster.java


注:本文中的org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.ROLLBACK属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。