当前位置: 首页>>代码示例>>Java>>正文


Java ClientDatanodeProtocol.copyBlock方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol.copyBlock方法的典型用法代码示例。如果您正苦于以下问题:Java ClientDatanodeProtocol.copyBlock方法的具体用法?Java ClientDatanodeProtocol.copyBlock怎么用?Java ClientDatanodeProtocol.copyBlock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol的用法示例。


在下文中一共展示了ClientDatanodeProtocol.copyBlock方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: copyBlock

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
public void copyBlock(TDatanodeID datanode,
                      ThdfsNamespaceId srcNamespaceId, ThdfsBlock srcblock,
                      ThdfsNamespaceId dstNamespaceId, ThdfsBlock destblock,
                      TDatanodeID target, boolean asynchronous)
                      throws ThriftIOException, TException {
  Block sblk = new Block(srcblock.blockId, srcblock.numBytes, 
                         srcblock.generationStamp);
  Block dblk = new Block(destblock.blockId, destblock.numBytes, 
                         destblock.generationStamp);
  DatanodeInfo targs = new DatanodeInfo(
       new DatanodeID(target.name, "", -1, getPort(target.name)));

  // make RPC to datanode
  try {
    ClientDatanodeProtocol remote = getOrCreate(datanode.name);
    remote.copyBlock(srcNamespaceId.id, sblk,
                     dstNamespaceId.id, dblk,
                     targs, asynchronous);
  } catch (IOException e) {
    String msg = "Error copyBlock datanode " + datanode.name +
                 " srcnamespaceid " + srcNamespaceId.id +
                 " destnamespaceid " + dstNamespaceId.id +
                 " srcblock " + sblk +
                 " destblock " + dblk;
    LOG.warn(msg);
    throw new ThriftIOException(msg);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:HadoopThriftDatanodeServer.java

示例2: run

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
@Override
public void run() {

  String msg = "";
  try {
    // find a random datanode from the destination cluster
    DatanodeInfo[] targets = destFs.getClient().datanodeReport(DatanodeReportType.LIVE);
    DatanodeInfo target = targets[rand.nextInt(targets.length)];

    // find a source datanode from among the datanodes that host this block
    DatanodeInfo srcdn  = goodBlock.getLocations()[rand.nextInt(goodBlock.getLocations().length)];
  
    // The RPC is asynchronous, i.e. the RPC will return immediately even before the
    // physical block copy occurs from the datanode.
    msg = "File " + badfile + ": Copying block " + 
          goodBlock.getBlock().getBlockName() + " from " + srcdn.getName() +
          " to block " + badBlock.getBlock().getBlockName() + 
          " on " + target.getName();
    LOG.info(msg);
    ClientDatanodeProtocol datanode = createClientDatanodeProtocolProxy(srcdn, conf);
    datanode.copyBlock(goodBlock.getBlock(), badBlock.getBlock(), target);
    RPC.stopProxy(datanode);
    HighTideNode.getMetrics().fixSuccessfullyStarted.inc();
  } catch (Throwable e) {
    HighTideNode.getMetrics().fixFailedDatanodeError.inc();
    LOG.error(StringUtils.stringifyException(e) + msg + ". Failed to contact datanode.");
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:FileFixer.java

示例3: copyBlockReplica

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
/**
 * Copies over a single replica of a block to a destination datanode.
 */
private void copyBlockReplica() {
  boolean error = false;
  try {
    // Timeout of 8 minutes for this RPC, this is sufficient since
    // PendingReplicationMonitor timeout itself is 5 minutes.
    ClientDatanodeProtocol cdp = getDatanodeConnection(srcDn, conf,
        rpcTimeout);
    LOG.debug("Fast Copy : Copying block " + src.getBlockName() + " to "
        + dst.getBlockName() + " on " + dstDn.getHostName());
    // This is a blocking call that does not return until the block is
    // successfully copied on the Datanode.
    if (supportFederation) {
      cdp.copyBlock(srcNamespaceId, src, 
          dstNamespaceId, dst, dstDn,
          false);
    } else {
      cdp.copyBlock(src, dst, dstDn,
          false);
    }
  } catch (Exception e) {
    String errMsg = "Fast Copy : Failed for Copying block "
      + src.getBlockName() + " to " + dst.getBlockName() + " on "
      + dstDn.getHostName();
    LOG.warn(errMsg, e);
    error = true;
    handleException(e);
  }
  updateBlockStatus(dst, error);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:33,代码来源:FastCopy.java

示例4: testCopyBlockAPI

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
@Test
public void testCopyBlockAPI() throws Exception {
  // Generate source file and get its locations.
  String filename = "/testCopyBlockAPI";
  DFSTestUtil.createFile(fs, new Path(filename), 1023 * 10, (short) 3,
      (long) 0);
  FileStatus srcFileStatus = fs.getFileStatus(new Path(filename));
  LocatedBlocksWithMetaInfo lbkSrcMetaInfo = cluster.getNameNode()
      .openAndFetchMetaInfo(filename, 0, Long.MAX_VALUE);
  int srcNamespaceId = lbkSrcMetaInfo.getNamespaceID();
  LocatedBlock lbkSrc = lbkSrcMetaInfo.getLocatedBlocks().get(0);
  DatanodeInfo[] srcLocs = lbkSrc.getLocations();

  // Create destination file and add a single block.
  String newFile = "/testCopyBlockAPI_new";
  String clientName = newFile;
  fs.create(new Path(filename + "new"));
  cluster.getNameNode().create(newFile, srcFileStatus.getPermission(),
      clientName, true, true, srcFileStatus.getReplication(),
      srcFileStatus.getBlockSize());
  LocatedBlockWithMetaInfo lbkDstMetaInfo =
    cluster.getNameNode().addBlockAndFetchMetaInfo(newFile, clientName, null, srcLocs);
  int dstNamespaceId = lbkDstMetaInfo.getNamespaceID();
  LocatedBlock lbkDst = lbkDstMetaInfo;

  // Verify locations of src and destination block.
  DatanodeInfo[] dstLocs = lbkDst.getLocations();
  Arrays.sort(srcLocs);
  Arrays.sort(dstLocs);
  assertEquals(srcLocs.length, dstLocs.length);
  for (int i = 0; i < srcLocs.length; i++) {
    assertEquals(srcLocs[i], dstLocs[i]);
  }

  // Create datanode rpc connections.
  ClientDatanodeProtocol cdp2 = DFSClient.createClientDatanodeProtocolProxy(
      srcLocs[2], conf, 5 * 60 * 1000);

  Block srcBlock = new Block(lbkSrc.getBlock());
  Block dstBlock = new Block(lbkDst.getBlock());
  System.out.println("Copying src : " + srcBlock + " dst : " + dstBlock);

  // Find datanode object.
  DataNode datanode = null;
  for (DataNode dn : cluster.getDataNodes()) {
    DatanodeRegistration registration = dn.getDNRegistrationForNS(srcNamespaceId);
    if (registration.equals(srcLocs[0])) {
      datanode = dn;
      break;
    }
  }
  
  assertNotNull(datanode);

  // Submit a block transfer to location 2.
  ExecutorService pool = Executors.newSingleThreadExecutor();
  pool.submit(datanode.new DataTransfer(new DatanodeInfo[] { srcLocs[2] }, srcNamespaceId,
        srcBlock, dstNamespaceId, dstBlock, datanode));

  try {
    Thread.sleep(5000);
    // Submit another transfer to same location, should receive
    // BlockAlreadyExistsException.
    cdp2.copyBlock(srcNamespaceId, srcBlock, dstNamespaceId, dstBlock, srcLocs[2], false);
  } catch (RemoteException re) {
    // pass.
    return;
  } finally {
    // Shutdown RPC connections.
    RPC.stopProxy(cdp2);
  }
  fail("Second RPC did not throw Exception");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:74,代码来源:TestCopyBlockAPI.java


注:本文中的org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol.copyBlock方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。