当前位置: 首页>>代码示例>>Java>>正文


Java StorageDirectory.lock方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory.lock方法的典型用法代码示例。如果您正苦于以下问题:Java StorageDirectory.lock方法的具体用法?Java StorageDirectory.lock怎么用?Java StorageDirectory.lock使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory的用法示例。


在下文中一共展示了StorageDirectory.lock方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: assertClusterStartFailsWhenDirLocked

import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
 * Assert that, if sdToLock is locked, the cluster is not allowed to start up.
 * @param conf cluster conf to use
 * @param sdToLock the storage directory to lock
 */
private static void assertClusterStartFailsWhenDirLocked(
    Configuration conf, StorageDirectory sdToLock) throws IOException {
  // Lock the edits dir, then start the NN, and make sure it fails to start
  sdToLock.lock();
  MiniDFSCluster cluster = null;
  try {      
    cluster = new MiniDFSCluster.Builder(conf).format(false)
        .manageNameDfsDirs(false).numDataNodes(0).build();
    assertFalse("cluster should fail to start after locking " +
        sdToLock, sdToLock.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  } finally {
    cleanup(cluster);
    cluster = null;
    sdToLock.unlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestCheckpoint.java

示例2: testStorageAlreadyLockedErrorMessage

import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
 * Test that, an attempt to lock a storage that is already locked by nodename,
 * logs error message that includes JVM name of the namenode that locked it.
 */
@Test
public void testStorageAlreadyLockedErrorMessage() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  StorageDirectory savedSd = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(null)) {
      assertLockFails(sd);
      savedSd = sd;
    }
    
    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
        LogFactory.getLog(Storage.class));
    try {
      // try to lock the storage that's already locked
      savedSd.lock();
      fail("Namenode should not be able to lock a storage" +
          " that is already locked");
    } catch (IOException ioe) {
      // cannot read lock file on Windows, so message cannot get JVM name
      String lockingJvmName = Path.WINDOWS ? "" :
        " " + ManagementFactory.getRuntimeMXBean().getName();
      String expectedLogMessage = "It appears that another node "
        + lockingJvmName + " has already locked the storage directory";
      assertTrue("Log output does not contain expected log message: "
        + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
    }
  } finally {
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestCheckpoint.java

示例3: assertLockFails

import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
 * Assert that the given storage directory can't be locked, because
 * it's already locked.
 */
private static void assertLockFails(StorageDirectory sd) {
  try {
    sd.lock();
    // If the above line didn't throw an exception, then
    // locking must not be supported
    assertFalse(sd.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestCheckpoint.java

示例4: testSecondaryNameNodeLocking

import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
 * Test that the SecondaryNameNode properly locks its storage directories.
 */
@Test
public void testSecondaryNameNodeLocking() throws Exception {
  // Start a primary NN so that the secondary will start successfully
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    StorageDirectory savedSd = null;
    // Start a secondary NN, then make sure that all of its storage
    // dirs got locked.
    secondary = startSecondaryNameNode(conf);
    
    NNStorage storage = secondary.getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(null)) {
      assertLockFails(sd);
      savedSd = sd;
    }
    LOG.info("===> Shutting down first 2NN");
    secondary.shutdown();
    secondary = null;

    LOG.info("===> Locking a dir, starting second 2NN");
    // Lock one of its dirs, make sure it fails to start
    LOG.info("Trying to lock" + savedSd);
    savedSd.lock();
    try {
      secondary = startSecondaryNameNode(conf);
      assertFalse("Should fail to start 2NN when " + savedSd + " is locked",
          savedSd.isLockSupported());
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains("already locked", ioe);
    } finally {
      savedSd.unlock();
    }
    
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestCheckpoint.java


注:本文中的org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory.lock方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。