当前位置: 首页>>代码示例>>Java>>正文


Java StorageDirectory.unlock方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory.unlock方法的典型用法代码示例。如果您正苦于以下问题:Java StorageDirectory.unlock方法的具体用法?Java StorageDirectory.unlock怎么用?Java StorageDirectory.unlock使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory的用法示例。


在下文中一共展示了StorageDirectory.unlock方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: assertClusterStartFailsWhenDirLocked

import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
 * Assert that, if sdToLock is locked, the cluster is not allowed to start up.
 * @param conf cluster conf to use
 * @param sdToLock the storage directory to lock
 */
private static void assertClusterStartFailsWhenDirLocked(
    Configuration conf, StorageDirectory sdToLock) throws IOException {
  // Lock the edits dir, then start the NN, and make sure it fails to start
  sdToLock.lock();
  MiniDFSCluster cluster = null;
  try {      
    cluster = new MiniDFSCluster.Builder(conf).format(false)
        .manageNameDfsDirs(false).numDataNodes(0).build();
    assertFalse("cluster should fail to start after locking " +
        sdToLock, sdToLock.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  } finally {
    cleanup(cluster);
    cluster = null;
    sdToLock.unlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestCheckpoint.java

示例2: recoverCreateRead

import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
 * Analyze backup storage directories for consistency.<br>
 * Recover from incomplete checkpoints if required.<br>
 * Read VERSION and fstime files if exist.<br>
 * Do not load image or edits.
 *
 * @throws IOException if the node should shutdown.
 */
void recoverCreateRead() throws IOException {
  for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured storage dirs are inaccessible
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        // for backup node all directories may be unformatted initially
        LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
        LOG.info("Formatting ...");
        sd.clearDirectory(); // create empty current
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if(curState != StorageState.NOT_FORMATTED) {
        // read and verify consistency with other directories
        storage.readProperties(sd);
      }
    } catch(IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:BackupImage.java

示例3: recoverStorageDirs

import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
 * For each storage directory, performs recovery of incomplete transitions
 * (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
 * state into the dataDirStates map.
 * @param dataDirStates output of storage directory states
 * @return true if there is at least one valid formatted storage directory
 */
public static boolean recoverStorageDirs(StartupOption startOpt,
    NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates)
    throws IOException {
  boolean isFormatted = false;
  // This loop needs to be over all storage dirs, even shared dirs, to make
  // sure that we properly examine their state, but we make sure we don't
  // mutate the shared dir below in the actual loop.
  for (Iterator<StorageDirectory> it = 
                    storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    if (startOpt == StartupOption.METADATAVERSION) {
      /* All we need is the layout version. */
      storage.readProperties(sd);
      return true;
    }

    try {
      curState = sd.analyzeStorage(startOpt, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // name-node fails if any of the configured storage dirs are missing
        throw new InconsistentFSStateException(sd.getRoot(),
                    "storage directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if (curState != StorageState.NOT_FORMATTED 
          && startOpt != StartupOption.ROLLBACK) {
        // read and verify consistency with other directories
        storage.readProperties(sd, startOpt);
        isFormatted = true;
      }
      if (startOpt == StartupOption.IMPORT && isFormatted)
        // import of a checkpoint is allowed only into empty image directories
        throw new IOException("Cannot import image from a checkpoint. " 
            + " NameNode already contains an image in " + sd.getRoot());
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
    dataDirStates.put(sd,curState);
  }
  return isFormatted;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:FSImage.java

示例4: recoverCreate

import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
 * Analyze checkpoint directories.
 * Create directories if they do not exist.
 * Recover from an unsuccessful checkpoint if necessary.
 *
 * @throws IOException
 */
void recoverCreate(boolean format) throws IOException {
  storage.attemptRestoreRemovedStorage();
  storage.unlockAll();

  for (Iterator<StorageDirectory> it = 
               storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    boolean isAccessible = true;
    try { // create directories if don't exist yet
      if(!sd.getRoot().mkdirs()) {
        // do nothing, directory is already created
      }
    } catch(SecurityException se) {
      isAccessible = false;
    }
    if(!isAccessible)
      throw new InconsistentFSStateException(sd.getRoot(),
          "cannot access checkpoint directory.");
    
    if (format) {
      // Don't confirm, since this is just the secondary namenode.
      LOG.info("Formatting storage directory " + sd);
      sd.clearDirectory();
    }
    
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured checkpoint dirs are inaccessible 
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;  // it's ok since initially there is no current and VERSION
      case NORMAL:
        // Read the VERSION file. This verifies that:
        // (a) the VERSION file for each of the directories is the same,
        // and (b) when we connect to a NN, we can verify that the remote
        // node matches the same namespace that we ran on previously.
        storage.readProperties(sd);
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:60,代码来源:SecondaryNameNode.java

示例5: testSecondaryNameNodeLocking

import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
 * Test that the SecondaryNameNode properly locks its storage directories.
 */
@Test
public void testSecondaryNameNodeLocking() throws Exception {
  // Start a primary NN so that the secondary will start successfully
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    StorageDirectory savedSd = null;
    // Start a secondary NN, then make sure that all of its storage
    // dirs got locked.
    secondary = startSecondaryNameNode(conf);
    
    NNStorage storage = secondary.getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(null)) {
      assertLockFails(sd);
      savedSd = sd;
    }
    LOG.info("===> Shutting down first 2NN");
    secondary.shutdown();
    secondary = null;

    LOG.info("===> Locking a dir, starting second 2NN");
    // Lock one of its dirs, make sure it fails to start
    LOG.info("Trying to lock" + savedSd);
    savedSd.lock();
    try {
      secondary = startSecondaryNameNode(conf);
      assertFalse("Should fail to start 2NN when " + savedSd + " is locked",
          savedSd.isLockSupported());
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains("already locked", ioe);
    } finally {
      savedSd.unlock();
    }
    
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestCheckpoint.java


注:本文中的org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory.unlock方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。