当前位置: 首页>>代码示例>>Java>>正文


Java StartupOption.UPGRADE属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.UPGRADE属性的典型用法代码示例。如果您正苦于以下问题:Java StartupOption.UPGRADE属性的具体用法?Java StartupOption.UPGRADE怎么用?Java StartupOption.UPGRADE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption的用法示例。


在下文中一共展示了StartupOption.UPGRADE属性的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testUpgradeFromImage

public void testUpgradeFromImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
      System.setProperty("test.build.data", "build/test/data");
    }
    conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off
    cluster = new MiniDFSCluster(0, conf, numDataNodes, false, true,
                                 StartupOption.UPGRADE, null);
    cluster.waitActive();
    DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
    DFSClient dfsClient = dfs.dfs;
    //Safemode will be off only after upgrade is complete. Wait for it.
    while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) {
      LOG.info("Waiting for SafeMode to be OFF.");
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {}
    }

    verifyFileSystem(dfs);
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:26,代码来源:TestDFSUpgradeFromImage.java

示例2: parseArguments

private static StartupOption parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else
      return null;
  }
  return startOpt;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:22,代码来源:NameNode.java

示例3: parseArguments

private static StartupOption parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.BACKUP;
    } else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.CHECKPOINT;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else
      return null;
  }
  return startOpt;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:26,代码来源:NameNode.java

示例4: testNonFederationClusterUpgradeAfterFederationVersion

public void testNonFederationClusterUpgradeAfterFederationVersion()
    throws Exception {
  File[] baseDirs;
  UpgradeUtilities.initialize();
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);      
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
    String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
    log("DataNode upgrade with federation layout version in current", numDirs);
    UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
    try {
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
          new StorageInfo(FSConstants.FEDERATION_VERSION,
                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                          UpgradeUtilities.getCurrentFsscTime(cluster)), 
          cluster.getNameNode().getNamespaceID());
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      checkResult(DATA_NODE, dataNodeDirs, 0, false);
    } finally {
      if (cluster != null) cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:TestDFSUpgrade.java

示例5: testFederationClusterUpgradeAfterFederationVersion

public void testFederationClusterUpgradeAfterFederationVersion()
    throws Exception {
  File[] baseDirs;
  Configuration baseConf = new Configuration();
  UpgradeUtilities.initialize(2, baseConf, true);
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
     String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
    String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
    log("DataNode upgrade with federation layout version in current", numDirs);
    UpgradeUtilities.createFederatedNameNodeStorageDirs(nameNodeDirs);
    conf.set(FSConstants.DFS_FEDERATION_NAMESERVICES, 
        baseConf.get(FSConstants.DFS_FEDERATION_NAMESERVICES));
    try {
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE, false, 2);
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      for (int i = 0; i < 2; i++) {
        UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
          new StorageInfo(FSConstants.FEDERATION_VERSION,
                          cluster.getNameNode(i).getNamespaceID(),
                          cluster.getNameNode(i).versionRequest().getCTime()),
          cluster.getNameNode(i).getNamespaceID());
      }
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      for (int i = 0 ;i < 2; i++) {
        checkResult(DATA_NODE, dataNodeDirs, i, false);
      }
    } finally {
      if (cluster != null) cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:36,代码来源:TestDFSUpgrade.java

示例6: testFederationClusterUpgradeAfterFederationVersionWithCTimeChange

public void testFederationClusterUpgradeAfterFederationVersionWithCTimeChange()
    throws Exception {
  File[] baseDirs;
  Configuration baseConf = new Configuration();
  UpgradeUtilities.initialize(2, baseConf, true);
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
    String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
    log("DataNode upgrade with federation layout version in current and ctime change",
        numDirs);
    UpgradeUtilities.createFederatedNameNodeStorageDirs(nameNodeDirs);
    conf.set(FSConstants.DFS_FEDERATION_NAMESERVICES,
        baseConf.get(FSConstants.DFS_FEDERATION_NAMESERVICES));
    try {
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE, false, 2);
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs,
          "current");
      for (int i = 0; i < 2; i++) {
        UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
            new StorageInfo(FSConstants.FEDERATION_VERSION, cluster
                .getNameNode(i).getNamespaceID(), cluster.getNameNode(i)
                .versionRequest().getCTime() - 1), cluster.getNameNode(i)
                .getNamespaceID());
      }
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);

      for (int i = 0; i < 2; i++) {
        checkResult(DATA_NODE, dataNodeDirs, i, false);
      }
    } finally {
      if (cluster != null)
        cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:40,代码来源:TestDFSUpgrade.java

示例7: startNnInUpgrade

/**
 * Start the NN in upgrade mode and verify the upgradeTime
 * @return
 * @throws IOException
 * @throws InterruptedException
 */
private MiniDFSCluster startNnInUpgrade() 
  throws IOException, InterruptedException {
  Configuration conf = new Configuration();

  MiniDFSCluster cluster = new MiniDFSCluster(0, conf, 1, true, true, 
      StartupOption.UPGRADE, null);
  Thread.sleep(1000 * 60);
  FSNamesystem ns = cluster.getNameNode().getNamesystem();
  assertTrue(ns.getUpgradeTime() >= 1);  

  return cluster;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:TestNameNodeUpgrade.java

示例8: testUpgradeFromImage

@Test
public void testUpgradeFromImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
      System.setProperty("test.build.data", "build/test/data");
    }
    conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off
    cluster = new MiniDFSCluster(0, conf, numDataNodes, false, true,
                                 StartupOption.UPGRADE, null);
    cluster.waitActive();
    DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                         cluster.getNameNodePort()), conf);
    //Safemode will be off only after upgrade is complete. Wait for it.
    while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) {
      LOG.info("Waiting for SafeMode to be OFF.");
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {}
    }

    verifyFileSystem(dfsClient);
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:TestDFSUpgradeFromImage.java

示例9: parseArguments

private static StartupOptionAndService parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  String serviceName = null;
  boolean failOnTxIdMismatch = true;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.SERVICE.getName().equalsIgnoreCase(cmd)) {
      if (++i < argsLen) {
        serviceName = args[i];
      } else {
        return null;
      }
    } else if (StartupOption.IGNORETXIDMISMATCH.getName().equalsIgnoreCase(cmd)) {
      failOnTxIdMismatch = false;
    } else if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else {
      return null;
    }
  }
  return new StartupOptionAndService(startOpt, serviceName,
      failOnTxIdMismatch);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:34,代码来源:NameNode.java

示例10: testFederationClusterUpgradeAfterFederationVersionWithTopLevelLayout

public void testFederationClusterUpgradeAfterFederationVersionWithTopLevelLayout()
    throws Exception {
  File[] baseDirs;
  Configuration baseConf = new Configuration();
  UpgradeUtilities.initialize(2, baseConf, true);
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
    String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
    log("DataNode upgrade with federation layout version in current and no ns level layout version",
        numDirs);
    UpgradeUtilities.createFederatedNameNodeStorageDirs(nameNodeDirs);
    conf.set(FSConstants.DFS_FEDERATION_NAMESERVICES,
        baseConf.get(FSConstants.DFS_FEDERATION_NAMESERVICES));
    try {
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE, false, 2);
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs,
          "current");
      for (int i = 0; i < 2; i++) {
        UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
            new StorageInfo(FSConstants.FEDERATION_VERSION, cluster
                .getNameNode(i).getNamespaceID(), cluster.getNameNode(i)
                .versionRequest().getCTime()), cluster.getNameNode(i)
                .getNamespaceID(), false);
      }
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);

      for (int i = 0; i < 2; i++) {
        checkResult(DATA_NODE, dataNodeDirs, i, false);
      }

      // Finalize upgrade.
      for (int i = 0; i < 2; i++) {
        cluster.getNameNode(i).finalizeUpgrade();
      }
      cluster.restartDataNodes();

      // Wait for datanodes to finalize.
      Thread.sleep(10000);

      for (int nnIndex = 0; nnIndex < 2; nnIndex++) {
        for (int i = 0; i < dataNodeDirs.length; i++) {
          File nsBaseDir = NameSpaceSliceStorage.getNsRoot(cluster
              .getNameNode(nnIndex).getNamespaceID(), new File(
              dataNodeDirs[i], "current"));
          assertFalse(new File(nsBaseDir, "previous").exists());
        }
      }
    } finally {
      if (cluster != null)
        cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:58,代码来源:TestDFSUpgrade.java

示例11: testDistributedUpgrade

/**
 */
public void testDistributedUpgrade() throws Exception {
  int numDirs = 1;
  TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage();
  testImg.unpackStorage();
  int numDNs = testImg.numDataNodes;

  // register new upgrade objects (ignore all existing)
  UpgradeObjectCollection.initialize();
  UpgradeObjectCollection.registerUpgrade(new UO_Datanode1());
  UpgradeObjectCollection.registerUpgrade(new UO_Namenode1());
  UpgradeObjectCollection.registerUpgrade(new UO_Datanode2());
  UpgradeObjectCollection.registerUpgrade(new UO_Namenode2());
  UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
  UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());

  conf = new Configuration();
  if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
    System.setProperty("test.build.data", "build/test/data");
  }
  conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off

  log("NameNode start in regular mode when dustributed upgrade is required", numDirs);
  startNameNodeShouldFail(StartupOption.REGULAR);

  log("Start NameNode only distributed upgrade", numDirs);
  // cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
  cluster = new MiniDFSCluster(0, conf, 0, false, true,
                                StartupOption.UPGRADE, null);
  cluster.shutdown();

  log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
  startNameNodeShouldFail(StartupOption.REGULAR);

  log("NameNode rollback to the old version that require a dustributed upgrade", numDirs);
  startNameNodeShouldFail(StartupOption.ROLLBACK);

  log("Normal distributed upgrade for the cluster", numDirs);
  cluster = new MiniDFSCluster(0, conf, numDNs, false, true,
                                StartupOption.UPGRADE, null);
  DFSAdmin dfsAdmin = new DFSAdmin();
  dfsAdmin.setConf(conf);
  dfsAdmin.run(new String[] {"-safemode", "wait"});
  dfsAdmin.run(new String[] {"-finalizeUpgrade"});
  cluster.shutdown();

  // it should be ok to start in regular mode
  log("NameCluster regular startup after the upgrade", numDirs);
  cluster = new MiniDFSCluster(0, conf, numDNs, false, true,
                                StartupOption.REGULAR, null);
  cluster.waitActive();
  cluster.shutdown();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:54,代码来源:TestDistributedUpgrade.java

示例12: testDistributedUpgrade

/**
 */
public void testDistributedUpgrade() throws Exception {
  int numDirs = 1;
  TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage();
  testImg.unpackStorage();
  int numDNs = testImg.numDataNodes;

  // register new upgrade objects (ignore all existing)
  UpgradeObjectCollection.initialize();
  UpgradeObjectCollection.registerUpgrade(new UO_Datanode1());
  UpgradeObjectCollection.registerUpgrade(new UO_Namenode1());
  UpgradeObjectCollection.registerUpgrade(new UO_Datanode2());
  UpgradeObjectCollection.registerUpgrade(new UO_Namenode2());
  UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
  UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());

  conf = new Configuration();
  if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
    System.setProperty("test.build.data", "build/test/data");
  }
  conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off

  log("NameNode start in regular mode when dustributed upgrade is required", numDirs);
  startNameNodeShouldFail(StartupOption.REGULAR);

  log("Start NameNode only distributed upgrade", numDirs);
  // cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
  cluster = new MiniDFSCluster(0, conf, 0, false, true,
                                StartupOption.UPGRADE, null);
  cluster.shutdown();

  log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
  startNameNodeShouldFail(StartupOption.REGULAR);

  log("NameNode rollback to the old version that require a dustributed upgrade", numDirs);
  startNameNodeShouldFail(StartupOption.ROLLBACK);

  log("Normal distributed upgrade for the cluster", numDirs);
  cluster = new MiniDFSCluster(0, conf, numDNs, false, true,
                                StartupOption.UPGRADE, null);
  DFSAdmin dfsAdmin = new DFSAdmin();
  dfsAdmin.setConf(conf);
  dfsAdmin.run(new String[] {"-safemode", "wait"});
  cluster.shutdown();

  // it should be ok to start in regular mode
  log("NameCluster regular startup after the upgrade", numDirs);
  cluster = new MiniDFSCluster(0, conf, numDNs, false, true,
                                StartupOption.REGULAR, null);
  cluster.waitActive();
  cluster.shutdown();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:53,代码来源:TestDistributedUpgrade.java

示例13: testEarlierVersionEditLog

/**
 * Earlier versions of HDFS didn't persist block allocation to the edit log.
 * This makes sure that we can still load an edit log when the OP_CLOSE
 * is the opcode which adds all of the blocks. This is a regression
 * test for HDFS-2773.
 * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
 * which has a multi-block file. This is similar to the tests in
 * {@link TestDFSUpgradeFromImage} but none of those images include
 * a multi-block file.
 */
@Test
public void testEarlierVersionEditLog() throws Exception {
  final Configuration conf = new Configuration();
      
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
  String testDir = System.getProperty("test.build.data", "build/test/data");
  File dfsDir = new File(testDir, "image-1.0");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  assertFileExists(nameDir);
  File dataDir = new File(dfsDir, "data");
  assertFileExists(dataDir);
  
  conf.set("dfs.name.dir", nameDir.getAbsolutePath());
  conf.set("dfs.data.dir", dataDir.getAbsolutePath());

  conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
  // small safemode extension to make the test run faster.
  conf.set("dfs.safemode.extension", "1");
  MiniDFSCluster cluster = new  MiniDFSCluster(0, conf, 1, false, false,
      StartupOption.UPGRADE,
      null);
  cluster.waitActive();

  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/user/todd/4blocks");
    // Read it without caring about the actual data within - we just need
    // to make sure that the block states and locations are OK.
    readFile(fs, testPath);
    
    // Ensure that we can append to it - if the blocks were in some funny
    // state we'd get some kind of issue here. 
    FSDataOutputStream stm = fs.append(testPath);
    try {
      stm.write(1);
    } finally {
      IOUtils.closeStream(stm);
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:58,代码来源:TestPersistBlocks.java


注:本文中的org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.UPGRADE属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。