当前位置: 首页>>代码示例>>Java>>正文


Java StartupOption类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption的典型用法代码示例。如果您正苦于以下问题:Java StartupOption类的具体用法?Java StartupOption怎么用?Java StartupOption使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


StartupOption类属于org.apache.hadoop.hdfs.server.common.HdfsServerConstants包,在下文中一共展示了StartupOption类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readProperties

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
void readProperties(StorageDirectory sd, StartupOption startupOption)
    throws IOException {
  Properties props = readPropertiesFile(sd.getVersionFile());
  if (HdfsServerConstants.RollingUpgradeStartupOption.ROLLBACK.matches
      (startupOption)) {
    int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
    if (lv > getServiceLayoutVersion()) {
      // we should not use a newer version for rollingUpgrade rollback
      throw new IncorrectVersionException(getServiceLayoutVersion(), lv,
          "storage directory " + sd.getRoot().getAbsolutePath());
    }
    props.setProperty("layoutVersion",
        Integer.toString(HdfsConstants.NAMENODE_LAYOUT_VERSION));
  }
  setFieldsFromProperties(props, sd);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:NNStorage.java

示例2: startBackupNode

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
  // Set up testing environment directories
  hdfsDir = new File(TEST_DATA_DIR, "backupNode");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  File currDir = new File(hdfsDir, "name2");
  File currDir2 = new File(currDir, "current");
  File currDir3 = new File(currDir, "image");
  
  assertTrue(currDir.mkdirs());
  assertTrue(currDir2.mkdirs());
  assertTrue(currDir3.mkdirs());
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name2")).toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  
  // Start BackupNode
  String[] args = new String [] { StartupOption.BACKUP.getName() };
  BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);

  return bu;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSServerPorts.java

示例3: parseArguments

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
@VisibleForTesting
static boolean parseArguments(String args[], Configuration conf) {
  StartupOption startOpt = StartupOption.REGULAR;
  int i = 0;

  if (args != null && args.length != 0) {
    String cmd = args[i++];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      return false;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else {
      return false;
    }
  }

  setStartupOption(conf, startOpt);
  return (args == null || i == args.length);    // Fail if more than one cmd specified!
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:DataNode.java

示例4: loadBpStorageDirectories

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
/**
 * Analyze and load storage directories. Recover from previous transitions if
 * required.
 *
 * The block pool storages are either all analyzed or none of them is loaded.
 * Therefore, a failure on loading any block pool storage results a faulty
 * data volume.
 *
 * @param datanode Datanode to which this storage belongs to
 * @param nsInfo namespace information
 * @param dataDirs storage directories of block pool
 * @param startOpt startup option
 * @return an array of loaded block pool directories.
 * @throws IOException on error
 */
List<StorageDirectory> loadBpStorageDirectories(
    DataNode datanode, NamespaceInfo nsInfo,
    Collection<File> dataDirs, StartupOption startOpt) throws IOException {
  List<StorageDirectory> succeedDirs = Lists.newArrayList();
  try {
    for (File dataDir : dataDirs) {
      if (containsStorageDir(dataDir)) {
        throw new IOException(
            "BlockPoolSliceStorage.recoverTransitionRead: " +
                "attempt to load an used block storage: " + dataDir);
      }
      StorageDirectory sd =
          loadStorageDirectory(datanode, nsInfo, dataDir, startOpt);
      succeedDirs.add(sd);
    }
  } catch (IOException e) {
    LOG.warn("Failed to analyze storage directories for block pool "
        + nsInfo.getBlockPoolID(), e);
    throw e;
  }
  return succeedDirs;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:BlockPoolSliceStorage.java

示例5: Journal

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
Journal(Configuration conf, File logDir, String journalId,
    StartupOption startOpt, StorageErrorReporter errorReporter)
    throws IOException {
  storage = new JNStorage(conf, logDir, startOpt, errorReporter);
  this.journalId = journalId;

  refreshCachedData();
  
  this.fjm = storage.getJournalManager();
  
  this.metrics = JournalMetrics.create(this);
  
  EditLogFile latest = scanStorageForLatestEdits();
  if (latest != null) {
    highestWrittenTxId = latest.getLastTxId();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:Journal.java

示例6: testUpgrade4

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
@Ignore
public void testUpgrade4() throws Exception {
  int numDirs = 4;
  conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);      
  conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
  conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
  String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);

  log("NameNode upgrade with one bad storage dir", numDirs);
  UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
  try {
    // assert("storage dir has been prepared for failure before reaching this point");
    startNameNodeShouldFail(StartupOption.UPGRADE, IOException.class,
        Pattern.compile("failed in 1 storage"));
  } finally {
    // assert("storage dir shall be returned to normal state before exiting");
    UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestDFSUpgrade.java

示例7: testRollingUpgradeStartupOptionParsing

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
/**
 * Test that we can parse a StartupOption string with a
 * RollingUpgradeStartupOption.
 */
@Test
public void testRollingUpgradeStartupOptionParsing() {
  verifyStartupOptionResult("ROLLINGUPGRADE(ROLLBACK)",
                            StartupOption.ROLLINGUPGRADE,
                            RollingUpgradeStartupOption.ROLLBACK);
  verifyStartupOptionResult("ROLLINGUPGRADE(DOWNGRADE)",
                            StartupOption.ROLLINGUPGRADE,
                            RollingUpgradeStartupOption.DOWNGRADE);
  verifyStartupOptionResult("ROLLINGUPGRADE(STARTED)",
      StartupOption.ROLLINGUPGRADE,
      RollingUpgradeStartupOption.STARTED);

  try {
    verifyStartupOptionResult("ROLLINGUPGRADE(UNKNOWNOPTION)", StartupOption.ROLLINGUPGRADE, null);
    fail("Failed to get expected IllegalArgumentException");
  } catch(IllegalArgumentException iae) {
    // Expected!
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestHdfsServerConstants.java

示例8: restartNameNode

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
/**
 * Restart the namenode at a given index. Optionally wait for the cluster
 * to become active.
 */
public synchronized void restartNameNode(int nnIndex, boolean waitActive,
    String... args) throws IOException {
  String nameserviceId = nameNodes[nnIndex].nameserviceId;
  String nnId = nameNodes[nnIndex].nnId;
  StartupOption startOpt = nameNodes[nnIndex].startOpt;
  Configuration conf = nameNodes[nnIndex].conf;
  shutdownNameNode(nnIndex);
  if (args.length != 0) {
    startOpt = null;
  } else {
    args = createArgs(startOpt);
  }
  NameNode nn = NameNode.createNameNode(args, conf);
  nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, startOpt,
      conf);
  if (waitActive) {
    waitClusterUp();
    LOG.info("Restarted the namenode");
    waitActive();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:MiniDFSCluster.java

示例9: startBackupNode

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
BackupNode startBackupNode(Configuration conf,
                           StartupOption startupOpt,
                           int idx) throws IOException {
  Configuration c = new HdfsConfiguration(conf);
  String dirs = getBackupNodeDir(startupOpt, idx);
  c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
  c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
      "127.0.0.1:0");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
          "127.0.0.1:0");

  BackupNode bn = (BackupNode)NameNode.createNameNode(
      new String[]{startupOpt.getName()}, c);
  assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
  assertTrue(bn.getRole() + " must be in StandbyState",
             bn.getNamesystem().getHAState()
               .equalsIgnoreCase(HAServiceState.STANDBY.name()));
  return bn;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestBackupNode.java

示例10: testFormatClusterIdOption

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
@Test
public void testFormatClusterIdOption() throws IOException {
  
  // 1. should format without cluster id
  //StartupOption.FORMAT.setClusterId("");
  NameNode.format(config);
  // see if cluster id not empty.
  String cid = getClusterId(config);
  assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")) );

  // 2. successful format with given clusterid
  StartupOption.FORMAT.setClusterId("mycluster");
  NameNode.format(config);
  // see if cluster id matches with given clusterid.
  cid = getClusterId(config);
  assertTrue("ClusterId didn't match", cid.equals("mycluster"));

  // 3. format without any clusterid again. It should generate new
  //clusterid.
  StartupOption.FORMAT.setClusterId("");
  NameNode.format(config);
  String newCid = getClusterId(config);
  assertFalse("ClusterId should not be the same", newCid.equals(cid));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestClusterId.java

示例11: testLoadLogsFromBuggyEarlierVersions

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestFileAppendRestart.java

示例12: processStartupOptionsForUpgrade

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
/** 
 * Processes the startup options for the clusterid and blockpoolid 
 * for the upgrade. 
 * @param startOpt Startup options 
 * @param layoutVersion Layout version for the upgrade 
 * @throws IOException
 */
void processStartupOptionsForUpgrade(StartupOption startOpt, int layoutVersion)
    throws IOException {
  if (startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY) {
    // If upgrade from a release that does not support federation,
    // if clusterId is provided in the startupOptions use it.
    // Else generate a new cluster ID      
    if (!NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.FEDERATION, layoutVersion)) {
      if (startOpt.getClusterId() == null) {
        startOpt.setClusterId(newClusterID());
      }
      setClusterID(startOpt.getClusterId());
      setBlockPoolID(newBlockPoolID());
    } else {
      // Upgrade from one version of federation to another supported
      // version of federation doesn't require clusterID.
      // Warn the user if the current clusterid didn't match with the input
      // clusterid.
      if (startOpt.getClusterId() != null
          && !startOpt.getClusterId().equals(getClusterID())) {
        LOG.warn("Clusterid mismatch - current clusterid: " + getClusterID()
            + ", Ignoring given clusterid: " + startOpt.getClusterId());
      }
    }
    LOG.info("Using clusterid: " + getClusterID());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:NNStorage.java

示例13: upgradeAndVerify

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
void upgradeAndVerify(MiniDFSCluster.Builder bld, ClusterVerifier verifier)
    throws IOException {
  MiniDFSCluster cluster = null;
  try {
    bld.format(false).startupOption(StartupOption.UPGRADE)
      .clusterId("testClusterId");
    cluster = bld.build();
    cluster.waitActive();
    DistributedFileSystem dfs = cluster.getFileSystem();
    DFSClient dfsClient = dfs.dfs;
    //Safemode will be off only after upgrade is complete. Wait for it.
    while ( dfsClient.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET) ) {
      LOG.info("Waiting for SafeMode to be OFF.");
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {}
    }
    recoverAllLeases(dfsClient, new Path("/"));
    verifyFileSystem(dfs);

    if (verifier != null) {
      verifier.verifyClusterPostUpgrade(cluster);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  } 
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestDFSUpgradeFromImage.java

示例14: createHAState

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
protected HAState createHAState(StartupOption startOpt) {
  if (!haEnabled || startOpt == StartupOption.UPGRADE 
      || startOpt == StartupOption.UPGRADEONLY) {
    return ACTIVE_STATE;
  } else {
    return STANDBY_STATE;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:NameNode.java

示例15: testZeroBlockSize

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; //导入依赖的package包/类
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestFSImage.java


注:本文中的org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。