当前位置: 首页>>代码示例>>Java>>正文


Java HdfsServerConstants类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.HdfsServerConstants的典型用法代码示例。如果您正苦于以下问题:Java HdfsServerConstants类的具体用法?Java HdfsServerConstants怎么用?Java HdfsServerConstants使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HdfsServerConstants类属于org.apache.hadoop.hdfs.server.common包,在下文中一共展示了HdfsServerConstants类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readProperties

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
void readProperties(StorageDirectory sd, StartupOption startupOption)
    throws IOException {
  Properties props = readPropertiesFile(sd.getVersionFile());
  if (HdfsServerConstants.RollingUpgradeStartupOption.ROLLBACK.matches
      (startupOption)) {
    int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
    if (lv > getServiceLayoutVersion()) {
      // we should not use a newer version for rollingUpgrade rollback
      throw new IncorrectVersionException(getServiceLayoutVersion(), lv,
          "storage directory " + sd.getRoot().getAbsolutePath());
    }
    props.setProperty("layoutVersion",
        Integer.toString(HdfsConstants.NAMENODE_LAYOUT_VERSION));
  }
  setFieldsFromProperties(props, sd);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:NNStorage.java

示例2: readProperties

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
void readProperties(StorageDirectory sd, StartupOption startupOption)
    throws IOException {
  Properties props = readPropertiesFile(sd.getVersionFile());
  if (HdfsServerConstants.RollingUpgradeStartupOption.ROLLBACK.matches
      (startupOption)) {
    int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
    if (lv > getServiceLayoutVersion()) {
      // we should not use a newer version for rollingUpgrade rollback
      throw new IncorrectVersionException(getServiceLayoutVersion(), lv,
          "storage directory " + sd.getRoot().getAbsolutePath());
    }
    props.setProperty("layoutVersion",
        Integer.toString(HdfsServerConstants.NAMENODE_LAYOUT_VERSION));
  }
  setFieldsFromProperties(props, sd);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:NNStorage.java

示例3: scanEditLog

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
/**
 * @param file          File being scanned and validated.
 * @param maxTxIdToScan Maximum Tx ID to try to scan.
 *                      The scan returns after reading this or a higher
 *                      ID. The file portion beyond this ID is
 *                      potentially being updated.
 * @return Result of the validation
 * @throws IOException
 */
static FSEditLogLoader.EditLogValidation scanEditLog(File file,
    long maxTxIdToScan, boolean verifyVersion)
    throws IOException {
  EditLogFileInputStream in;
  try {
    in = new EditLogFileInputStream(file);
    // read the header, initialize the inputstream, but do not check the
    // layoutversion
    in.getVersion(verifyVersion);
  } catch (LogHeaderCorruptException e) {
    LOG.warn("Log file " + file + " has no valid header", e);
    return new FSEditLogLoader.EditLogValidation(0,
        HdfsServerConstants.INVALID_TXID, true);
  }

  try {
    return FSEditLogLoader.scanEditLog(in, maxTxIdToScan);
  } finally {
    IOUtils.closeStream(in);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:31,代码来源:EditLogFileInputStream.java

示例4: readLogVersion

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
/**
 * Read the header of fsedit log
 * @param in fsedit stream
 * @return the edit log version number
 * @throws IOException if error occurs
 */
@VisibleForTesting
static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
    throws IOException, LogHeaderCorruptException {
  int logVersion;
  try {
    logVersion = in.readInt();
  } catch (EOFException eofe) {
    throw new LogHeaderCorruptException(
        "Reached EOF when reading log header");
  }
  if (verifyLayoutVersion &&
      (logVersion < HdfsServerConstants.NAMENODE_LAYOUT_VERSION || // future version
       logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported
    throw new LogHeaderCorruptException(
        "Unexpected version of the file system log file: "
        + logVersion + ". Current version = "
        + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ".");
  }
  return logVersion;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:EditLogFileInputStream.java

示例5: renameReservedComponentOnUpgrade

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
/**
 * Same as {@link #renameReservedPathsOnUpgrade}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support snapshots, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
    if (Arrays.equals(component, HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
          RESERVED_ERROR_MSG);
      component =
          DFSUtil.string2Bytes(renameReservedMap
              .get(HdfsConstants.DOT_SNAPSHOT_DIR));
    }
  }
  return component;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:FSImageFormat.java

示例6: chooseStorage4Block

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
/**
 * Find whether the datanode contains good storage of given type to
 * place block of size <code>blockSize</code>.
 *
 * <p>Currently datanode only cares about the storage type, in this
 * method, the first storage of given type we see is returned.
 *
 * @param t requested storage type
 * @param blockSize requested block size
 */
public DatanodeStorageInfo chooseStorage4Block(StorageType t,
    long blockSize) {
  final long requiredSize =
      blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
  final long scheduledSize = blockSize * getBlocksScheduled(t);
  long remaining = 0;
  DatanodeStorageInfo storage = null;
  for (DatanodeStorageInfo s : getStorageInfos()) {
    if (s.getState() == State.NORMAL && s.getStorageType() == t) {
      if (storage == null) {
        storage = s;
      }
      long r = s.getRemaining();
      if (r >= requiredSize) {
        remaining += r;
      }
    }
  }
  if (requiredSize > remaining - scheduledSize) {
    return null;
  }
  return storage;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:DatanodeDescriptor.java

示例7: format

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
void format(StorageDirectory sd, NamespaceInfo nsInfo,
            String datanodeUuid) throws IOException {
  sd.clearDirectory(); // create directory
  this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
  this.clusterID = nsInfo.getClusterID();
  this.namespaceID = nsInfo.getNamespaceID();
  this.cTime = 0;
  setDatanodeUuid(datanodeUuid);

  if (sd.getStorageUuid() == null) {
    // Assign a new Storage UUID.
    sd.setStorageUuid(DatanodeStorage.generateUuid());
  }

  writeProperties(sd);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:DataStorage.java

示例8: scanStorageForLatestEdits

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
/**
 * Scan the local storage directory, and return the segment containing
 * the highest transaction.
 * @return the EditLogFile with the highest transactions, or null
 * if no files exist.
 */
private synchronized EditLogFile scanStorageForLatestEdits() throws IOException {
  if (!fjm.getStorageDirectory().getCurrentDir().exists()) {
    return null;
  }
  
  LOG.info("Scanning storage " + fjm);
  List<EditLogFile> files = fjm.getLogFiles(0);
  
  while (!files.isEmpty()) {
    EditLogFile latestLog = files.remove(files.size() - 1);
    latestLog.scanLog(Long.MAX_VALUE, false);
    LOG.info("Latest log is " + latestLog);
    if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
      // the log contains no transactions
      LOG.warn("Latest log " + latestLog + " has no transactions. " +
          "moving it aside and looking for previous log");
      latestLog.moveAsideEmptyFile();
    } else {
      return latestLog;
    }
  }
  
  LOG.info("No files in " + fjm);
  return null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:32,代码来源:Journal.java

示例9: getSegmentInfo

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
/**
 * @return the current state of the given segment, or null if the
 * segment does not exist.
 */
@VisibleForTesting
SegmentStateProto getSegmentInfo(long segmentTxId)
    throws IOException {
  EditLogFile elf = fjm.getLogFile(segmentTxId);
  if (elf == null) {
    return null;
  }
  if (elf.isInProgress()) {
    elf.scanLog(Long.MAX_VALUE, false);
  }
  if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
    LOG.info("Edit log file " + elf + " appears to be empty. " +
        "Moving it aside...");
    elf.moveAsideEmptyFile();
    return null;
  }
  SegmentStateProto ret = SegmentStateProto.newBuilder()
      .setStartTxId(segmentTxId)
      .setEndTxId(elf.getLastTxId())
      .setIsInProgress(elf.isInProgress())
      .build();
  LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " +
      TextFormat.shortDebugString(ret));
  return ret;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:30,代码来源:Journal.java

示例10: checkStripedBlockUC

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
private void checkStripedBlockUC(BlockInfoStriped block,
    boolean checkReplica) {
  assertEquals(0, block.numNodes());
  Assert.assertFalse(block.isComplete());
  Assert.assertEquals(StripedFileTestUtil.NUM_DATA_BLOCKS, block.getDataBlockNum());
  Assert.assertEquals(StripedFileTestUtil.NUM_PARITY_BLOCKS,
      block.getParityBlockNum());
  Assert.assertEquals(0,
      block.getBlockId() & HdfsServerConstants.BLOCK_GROUP_INDEX_MASK);

  Assert.assertEquals(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION,
      block.getBlockUCState());
  if (checkReplica) {
    Assert.assertEquals(GROUP_SIZE,
        block.getUnderConstructionFeature().getNumExpectedLocations());
    DatanodeStorageInfo[] storages = block.getUnderConstructionFeature()
        .getExpectedStorageLocations();
    for (DataNode dn : cluster.getDataNodes()) {
      Assert.assertTrue(includeDataNode(dn.getDatanodeId(), storages));
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:TestAddStripedBlocks.java

示例11: testReadURL

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
@Test
public void testReadURL() throws Exception {
  HttpURLConnection conn = mock(HttpURLConnection.class);
  doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
  doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
  doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");

  URLConnectionFactory factory = mock(URLConnectionFactory.class);
  doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
      anyBoolean());

  URL url = new URL("http://localhost/fakeLog");
  EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url,
      HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID, false);
  // Read the edit log and verify that we got all of the data.
  EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil
      .countEditLogOpTypes(elis);
  assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));

  // Check that length header was picked up.
  assertEquals(FAKE_LOG_DATA.length, elis.length());
  elis.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:TestEditLogFileInputStream.java

示例12: testRereplicateOnBoundaryTopology

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
/**
 * Test re-replication policy in boundary case.
 * Rack 2 has only one node group & the node in this node group is chosen
 * Rack 1 has two nodegroups & one of them is chosen.
 * Replica policy should choose the node from node group of Rack1 but not the
 * same nodegroup with chosen nodes.
 */
@Test
public void testRereplicateOnBoundaryTopology() throws Exception {
  for(int i=0; i<dataNodesInBoundaryCase.length; i++) {
    updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],
        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
  List<DatanodeStorageInfo> chosenNodes = new ArrayList<>();
  chosenNodes.add(storagesInBoundaryCase[0]);
  chosenNodes.add(storagesInBoundaryCase[5]);
  DatanodeStorageInfo[] targets;
  targets = chooseTarget(1, dataNodesInBoundaryCase[0], chosenNodes);
  assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[0], targets[0]));
  assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[5], targets[0]));
  assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestReplicationPolicyWithNodeGroup.java

示例13: testChooseFavoredNodesNodeGroup

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
/**
 * In this testcase, passed 2 favored nodes
 * dataNodes[0](Good Node), dataNodes[3](Bad node).
 * 1st replica should be placed on good favored node dataNodes[0].
 * 2nd replica should be on bad favored node's nodegroup dataNodes[4].
 * @throws Exception
 */
@Test
public void testChooseFavoredNodesNodeGroup() throws Exception {
  updateHeartbeatWithUsage(dataNodes[3],
      2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
      0L, 0L, 0, 0); // no space

  DatanodeStorageInfo[] targets;
  List<DatanodeDescriptor> expectedTargets =
      new ArrayList<DatanodeDescriptor>();
  expectedTargets.add(dataNodes[0]);
  expectedTargets.add(dataNodes[4]);
  List<DatanodeDescriptor> favouredNodes =
      new ArrayList<DatanodeDescriptor>();
  favouredNodes.add(dataNodes[3]);
  favouredNodes.add(dataNodes[0]);
  targets = chooseTarget(2, dataNodes[7], null, favouredNodes);
  assertTrue("1st Replica is incorrect",
    expectedTargets.contains(targets[0].getDatanodeDescriptor()));
  assertTrue("2nd Replica is incorrect",
    expectedTargets.contains(targets[1].getDatanodeDescriptor()));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:30,代码来源:TestReplicationPolicyWithNodeGroup.java

示例14: testChooseNodeWithMultipleStorages1

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
/**
 * Test whether the remaining space per storage is individually
 * considered.
 */
@Test
public void testChooseNodeWithMultipleStorages1() throws Exception {
  updateHeartbeatWithUsage(dataNodes[5],
      2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L,
      0L, 0L, 0, 0);

  updateHeartbeatForExtraStorage(
      2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L);

  DatanodeStorageInfo[] targets;
  targets = chooseTarget (1, dataNodes[5],
      new ArrayList<DatanodeStorageInfo>(), null);
  assertEquals(1, targets.length);
  assertEquals(storages[4], targets[0]);

  resetHeartbeatForStorages();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestReplicationPolicy.java

示例15: testChooseNodeWithMultipleStorages2

import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; //导入依赖的package包/类
/**
 * Test whether all storages on the datanode are considered while
 * choosing target to place block.
 */
@Test
public void testChooseNodeWithMultipleStorages2() throws Exception {
  updateHeartbeatWithUsage(dataNodes[5],
      2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L,
      0L, 0L, 0, 0);

  updateHeartbeatForExtraStorage(
      2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L);

  DatanodeStorageInfo[] targets;
  targets = chooseTarget (1, dataNodes[5],
      new ArrayList<DatanodeStorageInfo>(), null);
  assertEquals(1, targets.length);
  assertEquals(dataNodes[5], targets[0].getDatanodeDescriptor());

  resetHeartbeatForStorages();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestReplicationPolicy.java


注:本文中的org.apache.hadoop.hdfs.server.common.HdfsServerConstants类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。