当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeProtocol.DNA_TRANSFER属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol.DNA_TRANSFER属性的典型用法代码示例。如果您正苦于以下问题:Java DatanodeProtocol.DNA_TRANSFER属性的具体用法?Java DatanodeProtocol.DNA_TRANSFER怎么用?Java DatanodeProtocol.DNA_TRANSFER使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol的用法示例。


在下文中一共展示了DatanodeProtocol.DNA_TRANSFER属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: replicateBlocks

/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:NNThroughputBenchmark.java

示例2: replicateBlocks

/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:NNThroughputBenchmark.java

示例3: convert

public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }
  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
    case TRANSFER:
      action = DatanodeProtocol.DNA_TRANSFER;
      break;
    case INVALIDATE:
      action = DatanodeProtocol.DNA_INVALIDATE;
      break;
    case SHUTDOWN:
      action = DatanodeProtocol.DNA_SHUTDOWN;
      break;
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:PBHelper.java

示例4: replicateBlocks

/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
int replicateBlocks() throws IOException {
  // register datanode
  DatanodeCommand[] cmds = nameNode.sendHeartbeat(
      dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:NNThroughputBenchmark.java

示例5: replicateBlocks

/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused")
int replicateBlocks() throws IOException {
	// register datanode
	DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
			DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
	if (cmds != null) {
		for (DatanodeCommand cmd : cmds) {
			if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
				// Send a copy of a block to another datanode
				BlockCommand bcmd = (BlockCommand) cmd;
				return transferBlocks(bcmd.getBlocks(),
						bcmd.getTargets());
			}
		}
	}
	return 0;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:NNThroughputBenchmark.java

示例6: convert

public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(PBHelper.convert(cmd.getTargets()));
  return builder.build();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:23,代码来源:PBHelper.java

示例7: replicateBlocks

/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0, 0, 0).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:NNThroughputBenchmark.java

示例8: replicateBlocks

/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:22,代码来源:NNThroughputBenchmark.java

示例9: replicateBlocks

/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused")
// keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep =
      {new StorageReport(dnRegistration.getStorageID(), false, DF_CAPACITY,
          DF_USED, DF_CAPACITY - DF_USED, DF_USED)};
  DatanodeCommand[] cmds =
      nameNodeProto.sendHeartbeat(dnRegistration, rep, 0, 0, 0)
          .getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand) cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:24,代码来源:NNThroughputBenchmark.java

示例10: replicateBlocks

/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
int replicateBlocks() throws IOException {
  // register datanode
  DatanodeCommand[] cmds = nameNode.sendHeartbeat(
      dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:18,代码来源:NNThroughputBenchmark.java

示例11: convert

public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(convert(cmd.getTargets()))
         .addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs()));
  return builder.build();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:24,代码来源:PBHelper.java

示例12: convert

public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(convert(cmd.getTargets()))
         .addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs()));
  StorageType[][] types = cmd.getTargetStorageTypes();
  if (types != null) {
    builder.addAllTargetStorageTypes(convert(types));
  }
  return builder.build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:PBHelper.java

示例13: testConvertBlockCommand

@Test
public void testConvertBlockCommand() {
  Block[] blocks = new Block[] { new Block(21), new Block(22) };
  DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
      new DatanodeInfo[2] };
  dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
  String[][] storageIDs = {{"s00"}, {"s10", "s11"}};
  StorageType[][] storageTypes = {{StorageType.DEFAULT},
      {StorageType.DEFAULT, StorageType.DEFAULT}};
  BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
      blocks, dnInfos, storageTypes, storageIDs);
  BlockCommandProto bcProto = PBHelper.convert(bc);
  BlockCommand bc2 = PBHelper.convert(bcProto);
  assertEquals(bc.getAction(), bc2.getAction());
  assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
  Block[] blocks2 = bc2.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    assertEquals(blocks[i], blocks2[i]);
  }
  DatanodeInfo[][] dnInfos2 = bc2.getTargets();
  assertEquals(dnInfos.length, dnInfos2.length);
  for (int i = 0; i < dnInfos.length; i++) {
    DatanodeInfo[] d1 = dnInfos[i];
    DatanodeInfo[] d2 = dnInfos2[i];
    assertEquals(d1.length, d2.length);
    for (int j = 0; j < d1.length; j++) {
      compare(d1[j], d2[j]);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestPBHelper.java

示例14: convert

public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelperClient.convert(blocks[i]));
  }
  builder.addAllTargets(PBHelperClient.convert(cmd.getTargets()))
         .addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs()));
  StorageType[][] types = cmd.getTargetStorageTypes();
  if (types != null) {
    builder.addAllTargetStorageTypes(PBHelperClient.convert(types));
  }
  return builder.build();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:28,代码来源:PBHelper.java

示例15: processCommandFromStandby

private boolean processCommandFromStandby(DatanodeCommand cmd,
    BPServiceActor actor) throws IOException {
  if (cmd == null)
    return true;
  switch(cmd.getAction()) {
  case DatanodeProtocol.DNA_REGISTER:
    // namenode requested a registration - at start or if NN lost contact
    LOG.info("DatanodeCommand action from standby: DNA_REGISTER");
    actor.reRegister();
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    LOG.info("DatanodeCommand action from standby: DNA_ACCESSKEYUPDATE");
    if (dn.isBlockTokenEnabled) {
      dn.blockPoolTokenSecretManager.addKeys(
          getBlockPoolId(), 
          ((KeyUpdateCommand) cmd).getExportedKeys());
    }
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
  case DatanodeProtocol.DNA_FINALIZE:
  case DatanodeProtocol.DNA_RECOVERBLOCK:
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    LOG.warn("Got a command from standby NN - ignoring command:" + cmd.getAction());
    break;
  default:
    LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
  }
  return true;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:31,代码来源:BPOfferService.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol.DNA_TRANSFER属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。