當前位置: 首頁>>代碼示例>>Java>>正文


Java FileUtil.setWritable方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileUtil.setWritable方法的典型用法代碼示例。如果您正苦於以下問題:Java FileUtil.setWritable方法的具體用法?Java FileUtil.setWritable怎麽用?Java FileUtil.setWritable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileUtil的用法示例。


在下文中一共展示了FileUtil.setWritable方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testDoPreUpgradeIOError

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Tests that internal renames are done using native code on platforms that
 * have it.  The native rename includes more detailed information about the
 * failure, which can be useful for troubleshooting.
 */
@Test
public void testDoPreUpgradeIOError() throws IOException {
  File storageDir = new File(TestEditLog.TEST_DIR, "preupgradeioerror");
  List<URI> editUris = Collections.singletonList(storageDir.toURI());
  NNStorage storage = setupEdits(editUris, 5);
  StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
  assertNotNull(sd);
  // Change storage directory so that renaming current to previous.tmp fails.
  FileUtil.setWritable(storageDir, false);
  FileJournalManager jm = null;
  try {
    jm = new FileJournalManager(conf, sd, storage);
    exception.expect(IOException.class);
    if (NativeCodeLoader.isNativeCodeLoaded()) {
      exception.expectMessage("failure in native rename");
    }
    jm.doPreUpgrade();
  } finally {
    IOUtils.cleanup(LOG, jm);
    // Restore permissions on storage directory and make sure we can delete.
    FileUtil.setWritable(storageDir, true);
    FileUtil.fullyDelete(storageDir);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:TestFileJournalManager.java

示例2: testFailedOpen

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Test
public void testFailedOpen() throws Exception {
  File logDir = new File(TEST_DIR, "testFailedOpen");
  logDir.mkdirs();
  FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
  try {
    FileUtil.setWritable(logDir, false);
    log.openForWrite();
    fail("Did no throw exception on only having a bad dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "too few journals successfully started", ioe);
  } finally {
    FileUtil.setWritable(logDir, true);
    log.close();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestEditLog.java

示例3: testFailToRename

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Test
public void testFailToRename() throws IOException {
  assumeTrue(Shell.WINDOWS);
  OutputStream fos = null;
  try {
    fos = new AtomicFileOutputStream(DST_FILE);
    fos.write(TEST_STRING.getBytes());
    FileUtil.setWritable(TEST_DIR, false);
    exception.expect(IOException.class);
    exception.expectMessage("failure in native rename");
    try {
      fos.close();
    } finally {
      fos = null;
    }
  } finally {
    IOUtils.cleanup(null, fos);
    FileUtil.setWritable(TEST_DIR, true);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestAtomicFileOutputStream.java

示例4: TemporarySocketDirectory

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
public TemporarySocketDirectory() {
  String tmp = System.getProperty("java.io.tmpdir", "/tmp");
  dir = new File(tmp, "socks." + (System.currentTimeMillis() +
      "." + (new Random().nextInt())));
  dir.mkdirs();
  FileUtil.setWritable(dir, true);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:8,代碼來源:TemporarySocketDirectory.java

示例5: testNameDirError

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Test
public void testNameDirError() throws IOException {
  LOG.info("Starting testNameDirError");
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
      .build();
  
  Collection<URI> nameDirs = cluster.getNameDirs(0);
  cluster.shutdown();
  cluster = null;
  
  for (URI nameDirUri : nameDirs) {
    File dir = new File(nameDirUri.getPath());
    
    try {
      // Simulate the mount going read-only
      FileUtil.setWritable(dir, false);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
          .format(false).build();
      fail("NN should have failed to start with " + dir + " set unreadable");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "storage directory does not exist or is not accessible", ioe);
    } finally {
      cleanup(cluster);
      cluster = null;
      FileUtil.setWritable(dir, true);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:31,代碼來源:TestCheckpoint.java

示例6: tearDown

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@After
public void tearDown() throws Exception {
  if(data_fail != null) {
    FileUtil.setWritable(data_fail, true);
  }
  if(failedDir != null) {
    FileUtil.setWritable(failedDir, true);
  }
  if(cluster != null) {
    cluster.shutdown();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:TestDataNodeVolumeFailure.java

示例7: testShutdown

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Test to check that a DN goes down when all its volumes have failed.
 */
@Test
public void testShutdown() throws Exception {
  if (System.getProperty("os.name").startsWith("Windows")) {
    /**
     * This test depends on OS not allowing file creations on a directory
     * that does not have write permissions for the user. Apparently it is 
     * not the case on Windows (at least under Cygwin), and possibly AIX.
     * This is disabled on Windows.
     */
    return;
  }
  // Bring up two more datanodes
  cluster.startDataNodes(conf, 2, true, null, null);
  cluster.waitActive();
  final int dnIndex = 0;
  String bpid = cluster.getNamesystem().getBlockPoolId();
  File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
  File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  storageDir = cluster.getInstanceStorageDir(dnIndex, 1);
  File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  try {
    // make the data directory of the first datanode to be readonly
    assertTrue("Couldn't chmod local vol", dir1.setReadOnly());
    assertTrue("Couldn't chmod local vol", dir2.setReadOnly());

    // create files and make sure that first datanode will be down
    DataNode dn = cluster.getDataNodes().get(dnIndex);
    for (int i=0; dn.isDatanodeUp(); i++) {
      Path fileName = new Path("/test.txt"+i);
      DFSTestUtil.createFile(fs, fileName, 1024, (short)2, 1L);
      DFSTestUtil.waitReplication(fs, fileName, (short)2);
      fs.delete(fileName, true);
    }
  } finally {
    // restore its old permission
    FileUtil.setWritable(dir1, true);
    FileUtil.setWritable(dir2, true);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:43,代碼來源:TestDiskError.java

示例8: cleanUp

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@After
public void cleanUp() throws IOException {
  FileUtil.setWritable(base, true);
  FileUtil.fullyDelete(base);
  assertTrue(!base.exists());
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:7,代碼來源:TestCryptoStreamsForLocalFS.java


注:本文中的org.apache.hadoop.fs.FileUtil.setWritable方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。