當前位置: 首頁>>代碼示例>>Java>>正文


Java VersionInfo.getVersion方法代碼示例

本文整理匯總了Java中org.apache.hadoop.util.VersionInfo.getVersion方法的典型用法代碼示例。如果您正苦於以下問題:Java VersionInfo.getVersion方法的具體用法?Java VersionInfo.getVersion怎麽用?Java VersionInfo.getVersion使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.util.VersionInfo的用法示例。


在下文中一共展示了VersionInfo.getVersion方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: ClusterInfo

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
public ClusterInfo(ResourceManager rm) {
  long ts = ResourceManager.getClusterTimeStamp();

  this.id = ts;
  this.state = rm.getServiceState();
  this.haState = rm.getRMContext().getHAServiceState();
  this.rmStateStoreName = rm.getRMContext().getStateStore().getClass()
      .getName();
  this.startedOn = ts;
  this.resourceManagerVersion = YarnVersionInfo.getVersion();
  this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
  this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate();
  this.hadoopVersion = VersionInfo.getVersion();
  this.hadoopBuildVersion = VersionInfo.getBuildVersion();
  this.hadoopVersionBuiltOn = VersionInfo.getDate();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:ClusterInfo.java

示例2: checkNNVersion

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
private void checkNNVersion(NamespaceInfo nsInfo)
    throws IncorrectVersionException {
  // build and layout versions should match
  String nnVersion = nsInfo.getSoftwareVersion();
  String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
  if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
    IncorrectVersionException ive = new IncorrectVersionException(
        minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
    LOG.warn(ive.getMessage());
    throw ive;
  }
  String dnVersion = VersionInfo.getVersion();
  if (!nnVersion.equals(dnVersion)) {
    LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
        "DataNode version '" + dnVersion + "' but is within acceptable " +
        "limits. Note: This is normal during a rolling upgrade.");
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:BPServiceActor.java

示例3: register

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:NNThroughputBenchmark.java

示例4: NodeInfo

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
public NodeInfo(final Context context, final ResourceView resourceView) {

    this.id = context.getNodeId().toString();
    this.nodeHostName = context.getNodeId().getHost();
    this.totalVmemAllocatedContainersMB = resourceView
        .getVmemAllocatedForContainers() / BYTES_IN_MB;
    this.vmemCheckEnabled = resourceView.isVmemCheckEnabled();
    this.totalPmemAllocatedContainersMB = resourceView
        .getPmemAllocatedForContainers() / BYTES_IN_MB;
    this.pmemCheckEnabled = resourceView.isPmemCheckEnabled();
    this.totalVCoresAllocatedContainers = resourceView
        .getVCoresAllocatedForContainers();
    this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy();
    this.lastNodeUpdateTime = context.getNodeHealthStatus()
        .getLastHealthReportTime();

    this.healthReport = context.getNodeHealthStatus().getHealthReport();

    this.nodeManagerVersion = YarnVersionInfo.getVersion();
    this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion();
    this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate();
    this.hadoopVersion = VersionInfo.getVersion();
    this.hadoopBuildVersion = VersionInfo.getBuildVersion();
    this.hadoopVersionBuiltOn = VersionInfo.getDate();
    this.nmStartupTime = NodeManager.getNMStartupTime();
  }
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:27,代碼來源:NodeInfo.java

示例5: createBPRegistration

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
/**
 * Create a DatanodeRegistration for a specific block pool.
 * @param nsInfo the namespace info from the first part of the NN handshake
 */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
  StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
  if (storageInfo == null) {
    // it's null in the case of SimulatedDataSet
    storageInfo = new StorageInfo(
        DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
        nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
        NodeType.DATA_NODE);
  }

  DatanodeID dnId = new DatanodeID(
      streamingAddr.getAddress().getHostAddress(), hostName, 
      storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
          infoSecurePort, getIpcPort());
  return new DatanodeRegistration(dnId, storageInfo, 
      new ExportedBlockKeys(), VersionInfo.getVersion());
}
 
開發者ID:yncxcw,項目名稱:FlexMap,代碼行數:22,代碼來源:DataNode.java

示例6: transferBlocks

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks( Block blocks[], 
                            DatanodeInfo xferTargets[][],
                            String targetStorageIDs[][]
                          ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for(int t = 0; t < blockTargets.length; t++) {
      DatanodeInfo dnInfo = blockTargets[t];
      String targetStorageID = targetStorageIDs[i][t];
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
        new DataStorage(nsInfo),
        new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks = {
        new ReceivedDeletedBlockInfo(
              blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
              null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          targetStorageID, rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
          .getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
 
開發者ID:yncxcw,項目名稱:FlexMap,代碼行數:31,代碼來源:NNThroughputBenchmark.java

示例7: register

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"), "", getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo, ""), new ExportedBlockKeys(),
      VersionInfo.getVersion());
  DataNode.setNewStorageID(dnRegistration);
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(dnRegistration.getStorageID());
  final StorageBlockReport[] reports = {new StorageBlockReport(storage,
      BlockReport.builder(NUM_BUCKETS).build())};
  nameNodeProto.blockReport(dnRegistration,
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:21,代碼來源:NNThroughputBenchmark.java

示例8: NodeInfo

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
public NodeInfo(final Context context, final ResourceView resourceView) {

    this.id = context.getNodeId().toString();
    this.nodeHostName = context.getNodeId().getHost();
    this.totalVmemAllocatedContainersMB = resourceView
        .getVmemAllocatedForContainers() / BYTES_IN_MB;
    this.vmemCheckEnabled = resourceView.isVmemCheckEnabled();
    this.totalPmemAllocatedContainersMB = resourceView
        .getPmemAllocatedForContainers() / BYTES_IN_MB;
    this.pmemCheckEnabled = resourceView.isPmemCheckEnabled();
    this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy();
    this.lastNodeUpdateTime = context.getNodeHealthStatus()
        .getLastHealthReportTime();

    this.healthReport = context.getNodeHealthStatus().getHealthReport();

    this.nodeManagerVersion = YarnVersionInfo.getVersion();
    this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion();
    this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate();
    this.hadoopVersion = VersionInfo.getVersion();
    this.hadoopBuildVersion = VersionInfo.getBuildVersion();
    this.hadoopVersionBuiltOn = VersionInfo.getDate();
  }
 
開發者ID:ict-carch,項目名稱:hadoop-plus,代碼行數:24,代碼來源:NodeInfo.java

示例9: transferBlocks

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks( Block blocks[], 
                            DatanodeInfo xferTargets[][] 
                          ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for(int t = 0; t < blockTargets.length; t++) {
      DatanodeInfo dnInfo = blockTargets[t];
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
        new DataStorage(nsInfo, dnInfo.getStorageID()),
        new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks = {
        new ReceivedDeletedBlockInfo(
              blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
              null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          receivedDNReg.getStorageID(), rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
          .getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
 
開發者ID:ict-carch,項目名稱:hadoop-plus,代碼行數:29,代碼來源:NNThroughputBenchmark.java

示例10: NodeInfo

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
public NodeInfo(final Context context, final ResourceView resourceView) {

    this.id = context.getNodeId().toString();
    this.nodeHostName = context.getNodeId().getHost();
    this.totalVmemAllocatedContainersMB = resourceView
        .getVmemAllocatedForContainers() / BYTES_IN_MB;
    this.vmemCheckEnabled = resourceView.isVmemCheckEnabled();
    this.totalPmemAllocatedContainersMB = resourceView
        .getPmemAllocatedForContainers() / BYTES_IN_MB;
    this.pmemCheckEnabled = resourceView.isPmemCheckEnabled();
    this.totalVCoresAllocatedContainers = resourceView
        .getVCoresAllocatedForContainers();
    this.totalGCoresAllocatedContainers = resourceView
        .getGCoresAllocatedForContainers();
    this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy();
    this.lastNodeUpdateTime = context.getNodeHealthStatus()
        .getLastHealthReportTime();

    this.healthReport = context.getNodeHealthStatus().getHealthReport();

    this.nodeManagerVersion = YarnVersionInfo.getVersion();
    this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion();
    this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate();
    this.hadoopVersion = VersionInfo.getVersion();
    this.hadoopBuildVersion = VersionInfo.getBuildVersion();
    this.hadoopVersionBuiltOn = VersionInfo.getDate();
  }
 
開發者ID:naver,項目名稱:hadoop,代碼行數:28,代碼來源:NodeInfo.java

示例11: getMajorVersion

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
/**
 * Get the Hadoop major version number.
 * 
 * @return The major version number of Hadoop.
 */
public String getMajorVersion() {
  String vers = VersionInfo.getVersion();

  String[] parts = vers.split("\\.");
  if (parts.length < 2) {
    throw new RuntimeException("Unable to parse Hadoop version: "
        + vers + " (expected X.Y.* format)");
  }
  return parts[0];

}
 
開發者ID:jianglibo,項目名稱:gora-boot,代碼行數:17,代碼來源:HadoopShimFactory.java

示例12: skipIfHadoopVersionIsNotAppropriate

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
/**
 * Skips all tests if the Hadoop version doesn't match.
 * We can't run this test class until HDFS-9213 is fixed which allows a secure DataNode
 * to bind to non-privileged ports for testing.
 * For now, we skip this test class until Hadoop version 3.x.x.
 */
private static void skipIfHadoopVersionIsNotAppropriate() {
	// Skips all tests if the Hadoop version doesn't match
	String hadoopVersionString = VersionInfo.getVersion();
	String[] split = hadoopVersionString.split("\\.");
	if (split.length != 3) {
		throw new IllegalStateException("Hadoop version was not of format 'X.X.X': " + hadoopVersionString);
	}
	Assume.assumeTrue(
		// check whether we're running Hadoop version >= 3.x.x
		Integer.parseInt(split[0]) >= 3
	);
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:19,代碼來源:RollingSinkSecuredITCase.java

示例13: testMkdirsFailsForExistingFile

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
/**
 * This test needs to be skipped for earlier Hadoop versions because those
 * have a bug.
 */
@Override
public void testMkdirsFailsForExistingFile() throws Exception {
	final String versionString = VersionInfo.getVersion();
	final String prefix = versionString.substring(0, 3);
	final float version = Float.parseFloat(prefix);
	Assume.assumeTrue("Cannot execute this test on Hadoop prior to 2.8", version >= 2.8f);

	super.testMkdirsFailsForExistingFile();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:14,代碼來源:HadoopLocalFileSystemBehaviorTest.java

示例14: checkNNVersion

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
private void checkNNVersion(NamespaceInfo nsInfo)
    throws IncorrectVersionException {
  // build and layout versions should match
  String nnVersion = nsInfo.getSoftwareVersion();
  String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
  if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
    IncorrectVersionException ive =
        new IncorrectVersionException(minimumNameNodeVersion, nnVersion,
            "NameNode", "DataNode");
    LOG.warn(ive.getMessage());
    throw ive;
  }
  String dnVersion = VersionInfo.getVersion();
  if (!nnVersion.equals(dnVersion)) {
    LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
        "DataNode version '" + dnVersion + "' but is within acceptable " +
        "limits. Note: This is normal during a rolling upgrade.");
  }

  if (HdfsConstants.LAYOUT_VERSION != nsInfo.getLayoutVersion()) {
    LOG.warn("DataNode and NameNode layout versions must be the same." +
        " Expected: " + HdfsConstants.LAYOUT_VERSION +
        " actual " + nsInfo.getLayoutVersion());
    throw new IncorrectVersionException(nsInfo.getLayoutVersion(),
        "namenode");
  }
}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:28,代碼來源:BPServiceActor.java

示例15: getVersionTable

import org.apache.hadoop.util.VersionInfo; //導入方法依賴的package包/類
/** Return a table containing version information. */
public static String getVersionTable() {
  return "<div class='dfstable'><table>"       
      + "\n  <tr><td class='col1'>Version:</td><td>" + VersionInfo.getVersion() + ", " + VersionInfo.getRevision() + "</td></tr>"
      + "\n  <tr><td class='col1'>Compiled:</td><td>" + VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch() + "</td></tr>"
      + "\n</table></div>";
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:8,代碼來源:JspHelper.java


注:本文中的org.apache.hadoop.util.VersionInfo.getVersion方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。