当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeCommand类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.DatanodeCommand的典型用法代码示例。如果您正苦于以下问题:Java DatanodeCommand类的具体用法?Java DatanodeCommand怎么用?Java DatanodeCommand使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DatanodeCommand类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了DatanodeCommand类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: handleHeartbeat

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 * 
 * If a substantial amount of time passed since the last datanode 
 * heartbeat then request an immediate block report.  
 * 
 * @return an array of datanode commands 
 * @throws IOException
 */
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xceiverCount, int xmitsInProgress, int failedVolumes,
    VolumeFailureSummary volumeFailureSummary) throws IOException {
  readLock();
  try {
    //get datanode commands
    final int maxTransfer = blockManager.getMaxReplicationStreams()
        - xmitsInProgress;
    DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
        nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed,
        xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary);
    
    //create ha status
    final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
        haContext.getState().getServiceState(),
        getFSImage().getLastAppliedOrWrittenTxId());

    return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo);
  } finally {
    readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:FSNamesystem.java

示例2: cacheReport

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
@Override
public DatanodeCommand cacheReport(DatanodeRegistration registration,
    String poolId, List<Long> blockIds) throws IOException {
  CacheReportRequestProto.Builder builder =
      CacheReportRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  for (Long blockId : blockIds) {
    builder.addBlocks(blockId);
  }
  
  CacheReportResponseProto resp;
  try {
    resp = rpcProxy.cacheReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  if (resp.hasCmd()) {
    return PBHelper.convert(resp.getCmd());
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例3: convert

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:PBHelper.java

示例4: cacheReport

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
@Override
public CacheReportResponseProto cacheReport(RpcController controller,
    CacheReportRequestProto request) throws ServiceException {
  DatanodeCommand cmd = null;
  try {
    cmd = impl.cacheReport(
        PBHelper.convert(request.getRegistration()),
        request.getBlockPoolId(),
        request.getBlocksList());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  CacheReportResponseProto.Builder builder =
      CacheReportResponseProto.newBuilder();
  if (cmd != null) {
    builder.setCmd(PBHelper.convert(cmd));
  }
  return builder.build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DatanodeProtocolServerSideTranslatorPB.java

示例5: sendHeartbeat

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node.
 * Ignore reply commands.
 */
void sendHeartbeat() throws IOException {
  // register datanode
  // TODO:FEDERATION currently a single block pool is supported
  StorageReport[] rep = { new StorageReport(storage, false,
      DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, rep,
      0L, 0L, 0, 0, 0, null).getCommands();
  if(cmds != null) {
    for (DatanodeCommand cmd : cmds ) {
      if(LOG.isDebugEnabled()) {
        LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:NNThroughputBenchmark.java

示例6: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:NNThroughputBenchmark.java

示例7: convert

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  case BlockECRecoveryCommand:
    return PBHelper.convert(proto.getBlkECRecoveryCmd());
  default:
    return null;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:PBHelper.java

示例8: sendHeartbeat

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node.
 * Ignore reply commands.
 */
void sendHeartbeat() throws IOException {
  // register datanode
  // TODO:FEDERATION currently a single block pool is supported
  StorageReport[] rep = { new StorageReport(storage, false,
      DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration, rep,
      0L, 0L, 0, 0, 0, null, true).getCommands();
  if(cmds != null) {
    for (DatanodeCommand cmd : cmds ) {
      if(LOG.isDebugEnabled()) {
        LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:NNThroughputBenchmark.java

示例9: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:NNThroughputBenchmark.java

示例10: blockReport

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
    String poolId, StorageBlockReport[] reports, BlockReportContext context)
      throws IOException {
  BlockReportRequestProto.Builder builder = BlockReportRequestProto
      .newBuilder().setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  
  for (StorageBlockReport r : reports) {
    StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
        .newBuilder().setStorage(PBHelper.convert(r.getStorage()));
    long[] blocks = r.getBlocks();
    for (int i = 0; i < blocks.length; i++) {
      reportBuilder.addBlocks(blocks[i]);
    }
    builder.addReports(reportBuilder.build());
  }
  builder.setContext(PBHelper.convert(context));
  BlockReportResponseProto resp;
  try {
    resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例11: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
int replicateBlocks() throws IOException {
  // register datanode
  DatanodeCommand[] cmds = nameNode.sendHeartbeat(
      dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:NNThroughputBenchmark.java

示例12: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused")
int replicateBlocks() throws IOException {
	// register datanode
	DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
			DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
	if (cmds != null) {
		for (DatanodeCommand cmd : cmds) {
			if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
				// Send a copy of a block to another datanode
				BlockCommand bcmd = (BlockCommand) cmd;
				return transferBlocks(bcmd.getBlocks(),
						bcmd.getTargets());
			}
		}
	}
	return 0;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:NNThroughputBenchmark.java

示例13: sendHeartbeatNew

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
public DatanodeCommand[] sendHeartbeatNew(DatanodeRegistration registration,
                                     long capacity,
                                     long dfsUsed, long remaining,
                                     long namespaceUsed,
                                     int xmitsInProgress,
                                     int xceiverCount) throws IOException {
  DatanodeCommand[] cmds = super.sendHeartbeat(
          registration, capacity, dfsUsed, remaining, namespaceUsed,
          xmitsInProgress, xceiverCount);

  if (standbySafeMode != null
      && standbySafeMode.reportHeartBeat(registration)) {
    LOG.info("Sending Clear Primary command to : " + registration);
    cmds = addCommand(cmds, AvatarDatanodeCommand.CLEARPRIMARY);
  } else if (ignoreDatanodes()) {
    cmds = addCommand(cmds, AvatarDatanodeCommand.BACKOFF);
  } else if (standbySafeMode != null &&
      standbySafeMode.getPrepareFailover()) {
    cmds = addCommand(cmds, AvatarDatanodeCommand.PREPAREFAILOVER);
  } 
  return cmds;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:AvatarNode.java

示例14: blockReportNew

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
public DatanodeCommand blockReportNew(DatanodeRegistration nodeReg, BlockReport rep) throws IOException {
  if (runInfo.shutdown || !runInfo.isRunning) {
    return null;
  }
  if (ignoreDatanodes()) {
    LOG.info("Standby fell behind. Telling " + nodeReg.toString() +
              " to back off");
    // Do not process block reports yet as the ingest thread is catching up
    return AvatarDatanodeCommand.BACKOFF;
  }
  
  if (currentAvatar == Avatar.STANDBY) {
    Collection<Block> failed = super.blockReportWithRetries(nodeReg, rep);

    // standby should send only DNA_RETRY
    BlockCommand bCmd = new BlockCommand(DatanodeProtocols.DNA_RETRY,
        failed.toArray(new Block[failed.size()]));
    return bCmd;
  } else {
    // only the primary can send DNA_FINALIZE
    return super.blockReport(nodeReg, rep);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:AvatarNode.java

示例15: blockReport

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
                                   long[] blocks) throws IOException {
  verifyRequest(nodeReg);
  myMetrics.numBlockReport.inc();
  BlockListAsLongs blist = new BlockListAsLongs(blocks);
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
           +"from "+nodeReg.getName()+" "+blist.getNumberOfBlocks() +" blocks");
  }

  namesystem.processReport(nodeReg, blist);
  if (getFSImage().isUpgradeFinalized())
    return DatanodeCommand.FINALIZE;
  return null;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:17,代码来源:NameNode.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.DatanodeCommand类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。