当前位置: 首页>>代码示例>>Java>>正文


Java NodeType类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType的典型用法代码示例。如果您正苦于以下问题:Java NodeType类的具体用法?Java NodeType怎么用?Java NodeType使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


NodeType类属于org.apache.hadoop.hdfs.server.common.HdfsConstants包,在下文中一共展示了NodeType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createStorageState

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/**
 * Sets up the storage directories for the given node type, either
 * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
 * dfs.data.dir, the subdirectories represented by the first four elements 
 * of the <code>state</code> array will be created and populated.
 * See UpgradeUtilities.createStorageDirs().
 * 
 * @param nodeType
 *   the type of node that storage should be created for. Based on this
 *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
 * @param state
 *   a row from the testCases table which indicates which directories
 *   to setup for the node
 * @return file paths representing either dfs.name.dir or dfs.data.dir
 *   directories
 */
String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
  String[] baseDirs = (nodeType == NAME_NODE ?
                       conf.getStrings("dfs.name.dir") :
                       conf.getStrings("dfs.data.dir"));
  UpgradeUtilities.createEmptyDirs(baseDirs);
  if (state[0])  // current
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
  if (state[1])  // previous
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
  if (state[2])  // previous.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
  if (state[3])  // removed.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
  if (state[4])  // lastcheckpoint.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "lastcheckpoint.tmp");
  return baseDirs;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:34,代码来源:TestDFSStorageStateRecovery.java

示例2: format

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/**
 * Format a namespace slice storage. 
 * @param sd the namespace storage
 * @param nsInfo the name space info
 * @throws IOException Signals that an I/O exception has occurred.
 */
private void format(StorageDirectory nsSdir, NamespaceInfo nsInfo) throws IOException {
  LOG.info("Formatting namespace " + namespaceID + " directory "
      + nsSdir.getCurrentDir());
  nsSdir.clearDirectory(); // create directory
  File rbwDir = new File(nsSdir.getCurrentDir(), STORAGE_DIR_RBW);
  File finalizedDir = new File(nsSdir.getCurrentDir(), STORAGE_DIR_FINALIZED);
  LOG.info("Creating Directories : " + rbwDir + ", " + finalizedDir);
  if (!rbwDir.mkdirs() || !finalizedDir.mkdirs()) {
    throw new IOException("Cannot create directories : " + rbwDir + ", "
        + finalizedDir);
  }
  this.layoutVersion = FSConstants.LAYOUT_VERSION;
  this.cTime = nsInfo.getCTime();
  this.namespaceID = nsInfo.getNamespaceID();
  this.storageType = NodeType.DATA_NODE;
  nsSdir.write();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:NameSpaceSliceStorage.java

示例3: checkResult

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
  switch (nodeType) {
  case NAME_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertTrue(new File(baseDirs[i],"current/edits").isFile());
      assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
      assertTrue(new File(baseDirs[i],"current/fstime").isFile());
    }
    break;
  case DATA_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"current")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
    break;
  }
  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:29,代码来源:TestDFSRollback.java

示例4: createStorageState

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/**
 * Sets up the storage directories for the given node type, either
 * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
 * dfs.data.dir, the subdirectories represented by the first four elements 
 * of the <code>state</code> array will be created and populated.
 * See UpgradeUtilities.createStorageDirs().
 * 
 * @param nodeType
 *   the type of node that storage should be created for. Based on this
 *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
 * @param state
 *   a row from the testCases table which indicates which directories
 *   to setup for the node
 * @return file paths representing either dfs.name.dir or dfs.data.dir
 *   directories
 */
String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
  String[] baseDirs = (nodeType == NAME_NODE ?
                       conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY) :
                       conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
  UpgradeUtilities.createEmptyDirs(baseDirs);
  if (state[0])  // current
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
  if (state[1])  // previous
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
  if (state[2])  // previous.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
  if (state[3])  // removed.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
  if (state[4])  // lastcheckpoint.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "lastcheckpoint.tmp");
  return baseDirs;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:34,代码来源:TestDFSStorageStateRecovery.java

示例5: createStorageDirs

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/**
 * Simulate the <code>dfs.name.dir</code> or <code>dfs.data.dir</code>
 * of a populated DFS filesystem.
 *
 * This method creates and populates the directory specified by
 *  <code>parent/dirName</code>, for each parent directory.
 * The contents of the new directories will be
 * appropriate for the given node type.  If the directory does not
 * exist, it will be created.  If the directory already exists, it
 * will first be deleted.
 *
 * By default, a singleton master populated storage
 * directory is created for a Namenode (contains edits, fsimage,
 * version, and time files) and a Datanode (contains version and
 * block files).  These directories are then
 * copied by this method to create new storage
 * directories of the appropriate type (Namenode or Datanode).
 *
 * @return the array of created directories
 */
public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName) throws Exception {
  File[] retVal = new File[parents.length];
  for (int i = 0; i < parents.length; i++) {
    File newDir = new File(parents[i], dirName);
    createEmptyDirs(new String[] {newDir.toString()});
    LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
    switch (nodeType) {
    case NAME_NODE:
      localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      break;
    case DATA_NODE:
      localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      break;
    }
    retVal[i] = newDir;
  }
  return retVal;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:43,代码来源:UpgradeUtilities.java

示例6: log

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase, StorageInfo version) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+version.getLayoutVersion()
           + " namespaceID="+version.getNamespaceID()
           + " fsscTime="+version.getCTime());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:17,代码来源:TestDFSStartupVersions.java

示例7: checksumMasterContents

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
public static long checksumMasterContents(NodeType nodeType, int nnIndex) throws IOException {
  if (nodeType == NAME_NODE) {
    return namenodeStorageChecksums[nnIndex];
  } else {
    return datanodeStorageChecksum;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:8,代码来源:UpgradeUtilities.java

示例8: checksumContents

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/**
 * Compute the checksum of all the files in the specified directory. The
 * contents of subdirectories are not included. This method provides an easy
 * way to ensure equality between the contents of two directories.
 *
 * @param nodeType
 *          if DATA_NODE then any file named "VERSION" is ignored. This is
 *          because this file file is changed every time the Datanode is
 *          started.
 * @param dir
 *          must be a directory. Subdirectories are ignored.
 * @param skipFiles
 *          files to be skipped
 *
 * @throws IllegalArgumentException
 *           if specified directory is not a directory
 * @throws IOException
 *           if an IOException occurs while reading the files
 * @return the computed checksum value
 */
public static long checksumContents(NodeType nodeType, File dir,
    String[] skipFiles) throws IOException {
  if (!dir.isDirectory()) {
    throw new IllegalArgumentException(
                                       "Given argument is not a directory:" + dir);
  }
  File[] list = dir.listFiles();
  Arrays.sort(list);
  CRC32 checksum = new CRC32();
  for (int i = 0; i < list.length; i++) {
    if (!list[i].isFile() || inList(skipFiles, list[i].getName())) {
      continue;
    }

    // skip VERSION file for DataNodes
    if (nodeType == DATA_NODE && list[i].getName().equals("VERSION")) {
      continue; 
    }
    FileInputStream fis = null;
    try {
      fis = new FileInputStream(list[i]);
      byte[] buffer = new byte[1024];
      int bytesRead;
      while ((bytesRead = fis.read(buffer)) != -1) {
        checksum.update(buffer, 0, bytesRead);
      }
    } finally {
      if(fis != null) {
        fis.close();
      }
    }
  }
  return checksum.getValue();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:55,代码来源:UpgradeUtilities.java

示例9: createStorageDirs

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName,
    File srcFile) throws Exception {
  File[] retVal = new File[parents.length];
  for (int i = 0; i < parents.length; i++) {
    File newDir = new File(parents[i], dirName);
    createEmptyDirs(new String[] {newDir.toString()});
    LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
    switch (nodeType) {
    case NAME_NODE:
      localFS.copyToLocalFile(new Path(srcFile.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      Path newImgDir = new Path(newDir.getParent(), "image");
      if (!localFS.exists(newImgDir))
        localFS.copyToLocalFile(
            new Path(srcFile.toString(), "image"),
            newImgDir,
            false);
      break;
    case DATA_NODE:
      localFS.copyToLocalFile(new Path(srcFile.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      Path newStorageFile = new Path(newDir.getParent(), "storage");
      if (!localFS.exists(newStorageFile))
        localFS.copyToLocalFile(
            new Path(srcFile.toString(), "storage"),
            newStorageFile,
            false);
      break;
    }
    retVal[i] = newDir;
  }
  return retVal;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:36,代码来源:UpgradeUtilities.java

示例10: checkResult

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      for (int i = 0; i < baseDirs.length; i++) {
        assertEquals(
                     UpgradeUtilities.checksumContents(
                                                       nodeType, new File(baseDirs[i],"current")),
                     UpgradeUtilities.checksumMasterContents(nodeType));
        File nsBaseDir= NameSpaceSliceStorage.getNsRoot(UpgradeUtilities.getCurrentNamespaceID(cluster), new File(baseDirs[i], "current"));
        assertEquals(
                     UpgradeUtilities.checksumContents(nodeType, new File(nsBaseDir,
                         MiniDFSCluster.FINALIZED_DIR_NAME)), 
                     UpgradeUtilities.checksumDatanodeNSStorageContents());
      }
      break;
    }
  }
  
  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:TestDFSRollback.java

示例11: checksumCurrent

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
private long[] checksumCurrent() throws IOException {
  int i = 0;
  long[] checksums = new long[storageSet.size()];
  for (StorageDirectory dir : storageSet) {
    assertTrue(dir.getCurrentDir().exists());
    checksums[i++] = UpgradeUtilities.checksumContents(
        NodeType.JOURNAL_NODE, dir.getCurrentDir(),
        new String[] { "last-promised-epoch" });
  }
  return checksums;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:TestAvatarQJMUpgrade.java

示例12: checkState

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
private void checkState(long[] checksums, boolean failover, boolean previous,
    long[] expectedChecksums)
    throws Exception {
  // Verify checksums for previous directory match with the older current
  // directory.

  int i = 0;
  for (StorageDirectory dir : h.storageSet) {
    System.out.println("Directory : " + dir);
    File dirToProcess = (previous) ? dir.getPreviousDir() : dir.getCurrentDir();
    assertTrue(dirToProcess.exists());
    // We skip last-promised-epoch since it is bumped up just before the
    // upgrade is done when we recover unfinalized segments.
    if (expectedChecksums == null) {
      assertEquals(checksums[i++], UpgradeUtilities.checksumContents(
          NodeType.JOURNAL_NODE, dirToProcess,
          new String[] { "last-promised-epoch" }));
    } else {
      assertEquals(checksums[i], expectedChecksums[i]);
      i++;
    }
  }

  // Ensure we can still write.
  DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/test1"), 1024,
      (short) 1, 0);

  // Ensure we can still do checkpoint.
  h.doCheckpoint();

  // Ensure we can still failover.
  if (failover) {
    cluster.failOver();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:36,代码来源:TestAvatarQJMUpgrade.java

示例13: NNStorage

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/**
 * Construct the NNStorage.
 * @param conf Namenode configuration.
 * @param imageDirs Directories the image can be stored in.
 * @param editsDirs Directories the editlog can be stored in.
 * @throws IOException if any directories are inaccessible.
 */
public NNStorage(Configuration conf, Collection<URI> imageDirs, 
    Collection<URI> editsDirs, Map<URI, NNStorageLocation> locationMap) 
    throws IOException {
  super(NodeType.NAME_NODE);

  storageDirs = Collections.synchronizedList(new ArrayList<StorageDirectory>());
  
  // this may modify the editsDirs, so copy before passing in
  setStorageDirectories(imageDirs, 
                        new ArrayList<URI>(editsDirs), locationMap);
  this.conf = conf;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:NNStorage.java

示例14: getFields

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/**
 * Get common storage fields.
 * Should be overloaded if additional fields need to be get.
 * 
 * @param props
 * @throws IOException
 */
protected void getFields(Properties props, 
                         StorageDirectory sd 
                         ) throws IOException {
  String sv, st, sid, sct;
  sv = props.getProperty(LAYOUT_VERSION);
  st = props.getProperty(STORAGE_TYPE);
  sid = props.getProperty(NAMESPACE_ID);
  sct = props.getProperty(CHECK_TIME);
  if (sv == null || st == null || sid == null || sct == null)
    throw new InconsistentFSStateException(sd.root,
                                           "file " + STORAGE_FILE_VERSION + " is invalid.");
  int rv = Integer.parseInt(sv);
  NodeType rt = NodeType.valueOf(st);
  int rid = Integer.parseInt(sid);
  long rct = Long.parseLong(sct);
  if (!storageType.equals(rt) ||
      !((namespaceID == 0) || (rid == 0) || namespaceID == rid))
    throw new InconsistentFSStateException(sd.root,
                                           "is incompatible with others. " +
                                           " namespaceID is " + namespaceID +
                                           " and rid is " + rid + "," +
                                           " storage type is " + storageType + 
                                           " but rt is " + rt);
  if (rv < FSConstants.LAYOUT_VERSION) // future version
    throw new IncorrectVersionException(rv, "storage directory " 
                                        + sd.root.getCanonicalPath());
  layoutVersion = rv;
  storageType = rt;
  namespaceID = rid;
  cTime = rct;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:39,代码来源:Storage.java

示例15: setStorageType

import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; //导入依赖的package包/类
/** Validate and set storage type from {@link Properties}*/
protected void setStorageType(Properties props, StorageDirectory sd)
    throws InconsistentFSStateException {
  NodeType type = NodeType.valueOf(getProperty(props, sd, STORAGE_TYPE));
  if (!storageType.equals(type)) {
    throw new InconsistentFSStateException(sd.root,
        "node type is incompatible with others.");
  }
  storageType = type;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:11,代码来源:Storage.java


注:本文中的org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。