当前位置: 首页>>代码示例>>Java>>正文


Java FileUtil.setWritable方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileUtil.setWritable方法的典型用法代码示例。如果您正苦于以下问题:Java FileUtil.setWritable方法的具体用法?Java FileUtil.setWritable怎么用?Java FileUtil.setWritable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileUtil的用法示例。


在下文中一共展示了FileUtil.setWritable方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testDoPreUpgradeIOError

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * Tests that internal renames are done using native code on platforms that
 * have it.  The native rename includes more detailed information about the
 * failure, which can be useful for troubleshooting.
 */
@Test
public void testDoPreUpgradeIOError() throws IOException {
  File storageDir = new File(TestEditLog.TEST_DIR, "preupgradeioerror");
  List<URI> editUris = Collections.singletonList(storageDir.toURI());
  NNStorage storage = setupEdits(editUris, 5);
  StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
  assertNotNull(sd);
  // Change storage directory so that renaming current to previous.tmp fails.
  FileUtil.setWritable(storageDir, false);
  FileJournalManager jm = null;
  try {
    jm = new FileJournalManager(conf, sd, storage);
    exception.expect(IOException.class);
    if (NativeCodeLoader.isNativeCodeLoaded()) {
      exception.expectMessage("failure in native rename");
    }
    jm.doPreUpgrade();
  } finally {
    IOUtils.cleanup(LOG, jm);
    // Restore permissions on storage directory and make sure we can delete.
    FileUtil.setWritable(storageDir, true);
    FileUtil.fullyDelete(storageDir);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestFileJournalManager.java

示例2: testFailedOpen

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Test
public void testFailedOpen() throws Exception {
  File logDir = new File(TEST_DIR, "testFailedOpen");
  logDir.mkdirs();
  FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
  try {
    FileUtil.setWritable(logDir, false);
    log.openForWrite();
    fail("Did no throw exception on only having a bad dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "too few journals successfully started", ioe);
  } finally {
    FileUtil.setWritable(logDir, true);
    log.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestEditLog.java

示例3: testFailToRename

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Test
public void testFailToRename() throws IOException {
  assumeTrue(Shell.WINDOWS);
  OutputStream fos = null;
  try {
    fos = new AtomicFileOutputStream(DST_FILE);
    fos.write(TEST_STRING.getBytes());
    FileUtil.setWritable(TEST_DIR, false);
    exception.expect(IOException.class);
    exception.expectMessage("failure in native rename");
    try {
      fos.close();
    } finally {
      fos = null;
    }
  } finally {
    IOUtils.cleanup(null, fos);
    FileUtil.setWritable(TEST_DIR, true);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestAtomicFileOutputStream.java

示例4: TemporarySocketDirectory

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
public TemporarySocketDirectory() {
  String tmp = System.getProperty("java.io.tmpdir", "/tmp");
  dir = new File(tmp, "socks." + (System.currentTimeMillis() +
      "." + (new Random().nextInt())));
  dir.mkdirs();
  FileUtil.setWritable(dir, true);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:8,代码来源:TemporarySocketDirectory.java

示例5: testNameDirError

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Test
public void testNameDirError() throws IOException {
  LOG.info("Starting testNameDirError");
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
      .build();
  
  Collection<URI> nameDirs = cluster.getNameDirs(0);
  cluster.shutdown();
  cluster = null;
  
  for (URI nameDirUri : nameDirs) {
    File dir = new File(nameDirUri.getPath());
    
    try {
      // Simulate the mount going read-only
      FileUtil.setWritable(dir, false);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
          .format(false).build();
      fail("NN should have failed to start with " + dir + " set unreadable");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "storage directory does not exist or is not accessible", ioe);
    } finally {
      cleanup(cluster);
      cluster = null;
      FileUtil.setWritable(dir, true);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestCheckpoint.java

示例6: tearDown

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@After
public void tearDown() throws Exception {
  if(data_fail != null) {
    FileUtil.setWritable(data_fail, true);
  }
  if(failedDir != null) {
    FileUtil.setWritable(failedDir, true);
  }
  if(cluster != null) {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestDataNodeVolumeFailure.java

示例7: testShutdown

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * Test to check that a DN goes down when all its volumes have failed.
 */
@Test
public void testShutdown() throws Exception {
  if (System.getProperty("os.name").startsWith("Windows")) {
    /**
     * This test depends on OS not allowing file creations on a directory
     * that does not have write permissions for the user. Apparently it is 
     * not the case on Windows (at least under Cygwin), and possibly AIX.
     * This is disabled on Windows.
     */
    return;
  }
  // Bring up two more datanodes
  cluster.startDataNodes(conf, 2, true, null, null);
  cluster.waitActive();
  final int dnIndex = 0;
  String bpid = cluster.getNamesystem().getBlockPoolId();
  File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
  File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  storageDir = cluster.getInstanceStorageDir(dnIndex, 1);
  File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  try {
    // make the data directory of the first datanode to be readonly
    assertTrue("Couldn't chmod local vol", dir1.setReadOnly());
    assertTrue("Couldn't chmod local vol", dir2.setReadOnly());

    // create files and make sure that first datanode will be down
    DataNode dn = cluster.getDataNodes().get(dnIndex);
    for (int i=0; dn.isDatanodeUp(); i++) {
      Path fileName = new Path("/test.txt"+i);
      DFSTestUtil.createFile(fs, fileName, 1024, (short)2, 1L);
      DFSTestUtil.waitReplication(fs, fileName, (short)2);
      fs.delete(fileName, true);
    }
  } finally {
    // restore its old permission
    FileUtil.setWritable(dir1, true);
    FileUtil.setWritable(dir2, true);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestDiskError.java

示例8: cleanUp

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@After
public void cleanUp() throws IOException {
  FileUtil.setWritable(base, true);
  FileUtil.fullyDelete(base);
  assertTrue(!base.exists());
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:7,代码来源:TestCryptoStreamsForLocalFS.java


注:本文中的org.apache.hadoop.fs.FileUtil.setWritable方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。