当前位置: 首页>>代码示例>>Java>>正文


Java NameNodeFile类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile的典型用法代码示例。如果您正苦于以下问题:Java NameNodeFile类的具体用法?Java NameNodeFile怎么用?Java NameNodeFile使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


NameNodeFile类属于org.apache.hadoop.hdfs.server.namenode.FSImage包,在下文中一共展示了NameNodeFile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: printStorages

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
/**
 * test
 */
public void printStorages(FSImage fs) {
  LOG.info("current storages and corresoponding sizes:");
  for (Iterator<StorageDirectory> it = fs.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();

    if (sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
      LOG.info("  image file " + imf.getAbsolutePath() + "; len = "
          + imf.length());
    }
    if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
      LOG.info("  edits file " + edf.getAbsolutePath() + "; len = "
          + edf.length());
    }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:21,代码来源:TestStorageRestore.java

示例2: verifyDifferentDirs

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
/**
 * verify that edits log and fsimage are in different directories and of a correct size
 */
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
  StorageDirectory sd =null;
  for (Iterator<StorageDirectory> it = img.dirIterator(); it.hasNext();) {
    sd = it.next();

    if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
      assertEquals(expectedImgSize, imf.length());	
    } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
      LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
      assertEquals(expectedEditsSize, edf.length());	
    } else {
      fail("Image/Edits directories are not different");
    }
  }

}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:23,代码来源:TestStartup.java

示例3: doMerge

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
/**
 * Merge image and edits, and verify consistency with the signature.
 */
private void doMerge(CheckpointSignature sig) throws IOException {
  getEditLog().open();
  StorageDirectory sdName = null;
  StorageDirectory sdEdits = null;
  Iterator<StorageDirectory> it = null;
  it = dirIterator(NameNodeDirType.IMAGE);
  if (it.hasNext())
    sdName = it.next();
  it = dirIterator(NameNodeDirType.EDITS);
  if (it.hasNext())
    sdEdits = it.next();
  if ((sdName == null) || (sdEdits == null))
    throw new IOException("Could not locate checkpoint directories");
  this.layoutVersion = -1; // to avoid assert in loadFSImage()
  loadFSImage(FSImage.getImageFile(sdName, NameNodeFile.IMAGE));
  loadFSEdits(sdEdits);
  sig.validateStorageInfo(this);
  saveNamespace(false);
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:23,代码来源:SecondaryNameNode.java

示例4: rollEditLog

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
/**
 * Closes the current edit log and opens edits.new. 
 */
synchronized void rollEditLog() throws IOException {
  waitForSyncToFinish();
  Iterator<StorageDirectory> it = fsimage.dirIterator(NameNodeDirType.EDITS);
  if(!it.hasNext()) 
    return;
  //
  // If edits.new already exists in some directory, verify it
  // exists in all directories.
  //
  boolean alreadyExists = existsNew(it.next());
  while(it.hasNext()) {
    StorageDirectory sd = it.next();
    if(alreadyExists != existsNew(sd))
      throw new IOException(getEditNewFile(sd) 
            + "should " + (alreadyExists ? "" : "not ") + "exist.");
  }
  if(alreadyExists)
    return; // nothing to do, edits.new exists!

  // check if any of failed storage is now available and put it back
  fsimage.attemptRestoreRemovedStorage();

  divertFileStreams(
      Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS_NEW.getName());
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:29,代码来源:FSEditLog.java

示例5: printStorages

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
/**
 * test
 */
public void printStorages(FSImage fs) {
  LOG.info("current storages and corresoponding sizes:");
  for(Iterator<StorageDirectory> it = fs.dirIterator(); it.hasNext(); ) {
    StorageDirectory sd = it.next();
    
    if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
      LOG.info("  image file " + imf.getAbsolutePath() + "; len = " + imf.length());  
    }
    if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
      LOG.info("  edits file " + edf.getAbsolutePath() + "; len = " + edf.length()); 
    }
  }
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:19,代码来源:TestStorageRestore.java

示例6: testFsTimeFileCorrupt

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
/**
 * Test that a corrupted fstime file in a single storage directory does not
 * prevent the NN from starting up.
 */
@Test
public void testFsTimeFileCorrupt() throws IOException, InterruptedException {
  assertEquals(cluster.getNameDirs().size(), 2);
  // Get the first fstime file and truncate it.
  truncateStorageDirFile(cluster, NameNodeFile.TIME, 0);
  // Make sure we can start up despite the fact the fstime file is corrupted.
  cluster.restartNameNode();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:13,代码来源:TestNameNodeCorruptionRecovery.java

示例7: truncateStorageDirFile

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
private static void truncateStorageDirFile(MiniDFSCluster cluster,
    NameNodeFile f, int storageDirIndex) throws IOException {
  File currentDir = cluster.getNameNode().getFSImage()
      .getStorageDir(storageDirIndex).getCurrentDir();
  File nameNodeFile = new File(currentDir, f.getName());
  assertTrue(nameNodeFile.isFile());
  assertTrue(nameNodeFile.delete());
  assertTrue(nameNodeFile.createNewFile());
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:10,代码来源:TestNameNodeCorruptionRecovery.java

示例8: runTest

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
void runTest(EditFileModifier modifier) throws IOException {
  //set toleration length
  final Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_TOLERATION_LENGTH_KEY, TOLERATION_LENGTH);

  final MiniDFSCluster cluster = new MiniDFSCluster(conf, 0, true, null);
  try {
    cluster.waitActive();

    //add a few transactions and then shutdown namenode.
    final FileSystem fs = cluster.getFileSystem();
    fs.mkdirs(new Path("/user/foo"));
    fs.mkdirs(new Path("/user/bar"));
    cluster.shutdownNameNode();

    //modify edit files
    for(File dir : FSNamesystem.getNamespaceEditsDirs(conf)) {
      final File editFile  = new File(new File(dir, "current"),
          NameNodeFile.EDITS.getName());
      assertTrue("Should exist: " + editFile, editFile.exists());

      modifier.modify(editFile);
    }

    try {
      //restart namenode.
      cluster.restartNameNode();
      
      //No exception: the modification must be tolerable.
      Assert.assertTrue(modifier.isTolerable());
    } catch (IOException e) {
      //Got an exception: the modification must be intolerable.
      LOG.info("Got an exception", e);
      Assert.assertFalse(modifier.isTolerable());
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:40,代码来源:TestEditLogToleration.java

示例9: corruptNameNodeFiles

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
private void corruptNameNodeFiles() throws IOException {
  // now corrupt/delete the directrory
  List<File> nameDirs = (List<File>)FSNamesystem.getNamespaceDirs(config);
  List<File> nameEditsDirs = (List<File>)FSNamesystem.getNamespaceEditsDirs(config);

  // get name dir and its length, then delete and recreate the directory
  File dir = nameDirs.get(0); // has only one
  this.fsimageLength = new File(new File(dir, "current"), 
      NameNodeFile.IMAGE.getName()).length();

  if(dir.exists() && !(FileUtil.fullyDelete(dir)))
    throw new IOException("Cannot remove directory: " + dir);

  LOG.info("--removed dir "+dir + ";len was ="+ this.fsimageLength);

  if (!dir.mkdirs())
    throw new IOException("Cannot create directory " + dir);

  dir = nameEditsDirs.get(0); //has only one

  this.editsLength = new File(new File(dir, "current"), 
      NameNodeFile.EDITS.getName()).length();

  if(dir.exists() && !(FileUtil.fullyDelete(dir)))
    throw new IOException("Cannot remove directory: " + dir);
  if (!dir.mkdirs())
    throw new IOException("Cannot create directory " + dir);

  LOG.info("--removed dir and recreated "+dir + ";len was ="+ this.editsLength);


}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:33,代码来源:TestStartup.java

示例10: verifyEditLogs

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
private void verifyEditLogs(FSNamesystem namesystem, FSImage fsimage)
  throws IOException {
  // Verify that we can read in all the transactions that we have written.
  // If there were any corruptions, it is likely that the reading in
  // of these transactions will throw an exception.
  for (Iterator<StorageDirectory> it = 
         fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
    File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
    System.out.println("Verifying file: " + editFile);
    int numEdits = new FSEditLogLoader(namesystem).loadFSEdits(
      new EditLogFileInputStream(editFile));
    System.out.println("Number of edits: " + numEdits);
  }
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:15,代码来源:TestEditLogRace.java

示例11: readCheckpointTime

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
/**
 * read currentCheckpointTime directly from the file
 * @param currDir
 * @return the checkpoint time
 * @throws IOException
 */
long readCheckpointTime(File currDir) throws IOException {
  File timeFile = new File(currDir, NameNodeFile.TIME.getName()); 
  long timeStamp = 0L;
  if (timeFile.exists() && timeFile.canRead()) {
    DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
    try {
      timeStamp = in.readLong();
    } finally {
      in.close();
    }
  }
  return timeStamp;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:20,代码来源:TestStorageRestore.java

示例12: corruptNameNodeFiles

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
private void corruptNameNodeFiles() throws IOException {
  // now corrupt/delete the directrory
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  List<URI> nameEditsDirs = (List<URI>)FSNamesystem.getNamespaceEditsDirs(config);

  // get name dir and its length, then delete and recreate the directory
  File dir = new File(nameDirs.get(0).getPath()); // has only one
  this.fsimageLength = new File(new File(dir, "current"), 
      NameNodeFile.IMAGE.getName()).length();

  if(dir.exists() && !(FileUtil.fullyDelete(dir)))
    throw new IOException("Cannot remove directory: " + dir);

  LOG.info("--removed dir "+dir + ";len was ="+ this.fsimageLength);

  if (!dir.mkdirs())
    throw new IOException("Cannot create directory " + dir);

  dir = new File( nameEditsDirs.get(0).getPath()); //has only one

  this.editsLength = new File(new File(dir, "current"), 
      NameNodeFile.EDITS.getName()).length();

  if(dir.exists() && !(FileUtil.fullyDelete(dir)))
    throw new IOException("Cannot remove directory: " + dir);
  if (!dir.mkdirs())
    throw new IOException("Cannot create directory " + dir);

  LOG.info("--removed dir and recreated "+dir + ";len was ="+ this.editsLength);


}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:33,代码来源:TestStartup.java

示例13: checkFiles

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
/**
 *  check if files exist/not exist
 */
public void checkFiles(boolean valid) {
  //look at the valid storage
  File fsImg1 = new File(path1, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.IMAGE.getName());
  File fsImg2 = new File(path2, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.IMAGE.getName());
  File fsImg3 = new File(path3, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.IMAGE.getName());

  File fsEdits1 = new File(path1, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS.getName());
  File fsEdits2 = new File(path2, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS.getName());
  File fsEdits3 = new File(path3, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS.getName());

  this.printStorages(cluster.getNameNode().getFSImage());
  
  LOG.info("++++ image files = "+fsImg1.getAbsolutePath() + "," + fsImg2.getAbsolutePath() + ","+ fsImg3.getAbsolutePath());
  LOG.info("++++ edits files = "+fsEdits1.getAbsolutePath() + "," + fsEdits2.getAbsolutePath() + ","+ fsEdits3.getAbsolutePath());
  LOG.info("checkFiles compares lengths: img1=" + fsImg1.length()  + ",img2=" + fsImg2.length()  + ",img3=" + fsImg3.length());
  LOG.info("checkFiles compares lengths: edits1=" + fsEdits1.length()  + ",edits2=" + fsEdits2.length()  + ",edits3=" + fsEdits3.length());
  
  if(valid) {
    assertTrue(fsImg1.exists());
    assertTrue(fsImg2.exists());
    assertFalse(fsImg3.exists());
    assertTrue(fsEdits1.exists());
    assertTrue(fsEdits2.exists());
    assertTrue(fsEdits3.exists());
    
   // should be the same
    assertTrue(fsImg1.length() == fsImg2.length());
    assertTrue(fsEdits1.length() == fsEdits2.length());
    assertTrue(fsEdits1.length() == fsEdits3.length());
  } else {
    // should be different
    assertTrue(fsEdits2.length() != fsEdits1.length());
    assertTrue(fsEdits2.length() != fsEdits3.length());
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:39,代码来源:TestStorageRestore.java

示例14: verifyEditLogs

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
private void verifyEditLogs(FSImage fsimage)
  throws IOException {
  // Verify that we can read in all the transactions that we have written.
  // If there were any corruptions, it is likely that the reading in
  // of these transactions will throw an exception.
  for (Iterator<StorageDirectory> it = 
         fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
    File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
    System.out.println("Verifying file: " + editFile);
    int numEdits = FSEditLog.loadFSEdits(
      new FSEditLog.EditLogFileInputStream(editFile));
    System.out.println("Number of edits: " + numEdits);
  }
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:15,代码来源:TestEditLogRace.java

示例15: testEditLog

import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile; //导入依赖的package包/类
/**
 * Tests transaction logging in dfs.
 */
public void testEditLog() throws IOException {

  // start a cluster 
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;

  try {
    cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null);
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNameNode().getNamesystem();

    for (Iterator<File> it = cluster.getNameDirs().iterator(); it.hasNext(); ) {
      File dir = new File(it.next().getPath());
      System.out.println(dir);
    }
    
    FSImage fsimage = namesystem.getFSImage();
    FSEditLog editLog = fsimage.getEditLog();

    // set small size of flush buffer
    editLog.setBufferCapacity(2048);
    editLog.close();
    editLog.open();
    namesystem.getDelegationTokenSecretManager().startThreads();
  
    // Create threads and make them run transactions concurrently.
    Thread threadId[] = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
      Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS);
      threadId[i] = new Thread(trans, "TransactionThread-" + i);
      threadId[i].start();
    }

    // wait for all transactions to get over
    for (int i = 0; i < NUM_THREADS; i++) {
      try {
        threadId[i].join();
      } catch (InterruptedException e) {
        i--;      // retry 
      }
    } 
    
    editLog.close();

    // Verify that we can read in all the transactions that we have written.
    // If there were any corruptions, it is likely that the reading in
    // of these transactions will throw an exception.
    //
    namesystem.getDelegationTokenSecretManager().stopThreads();
    int numKeys = namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
    for (Iterator<StorageDirectory> it = 
            fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
      File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
      System.out.println("Verifying file: " + editFile);
      int numEdits = FSEditLog.loadFSEdits(
          new EditLogFileInputStream(editFile), -1);
      assertTrue("Verification for " + editFile + " failed. " +
                 "Expected " + (NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys) + " transactions. "+
                 "Found " + numEdits + " transactions.",
                 numEdits == NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS +numKeys);

    }
  } finally {
    if(fileSys != null) fileSys.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:73,代码来源:TestSecurityTokenEditLog.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。