当前位置: 首页>>代码示例>>Java>>正文


Java ReplicaRecoveryInfo.getGenerationStamp方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo.getGenerationStamp方法的典型用法代码示例。如果您正苦于以下问题:Java ReplicaRecoveryInfo.getGenerationStamp方法的具体用法?Java ReplicaRecoveryInfo.getGenerationStamp怎么用?Java ReplicaRecoveryInfo.getGenerationStamp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo的用法示例。


在下文中一共展示了ReplicaRecoveryInfo.getGenerationStamp方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: recoverBlock

import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入方法依赖的package包/类
/** Recover a block */
private void recoverBlock(RecoveringBlock rBlock) throws IOException {
  ExtendedBlock block = rBlock.getBlock();
  String blookPoolId = block.getBlockPoolId();
  DatanodeID[] datanodeids = rBlock.getLocations();
  List<BlockRecord> syncList = new ArrayList<BlockRecord>(datanodeids.length);
  int errorCount = 0;

  //check generation stamps
  for(DatanodeID id : datanodeids) {
    try {
      BPOfferService bpos = blockPoolManager.get(blookPoolId);
      DatanodeRegistration bpReg = bpos.bpRegistration;
      InterDatanodeProtocol datanode = bpReg.equals(id)?
          this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
              dnConf.socketTimeout, dnConf.connectToDnViaHostname);
      ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
      if (info != null &&
          info.getGenerationStamp() >= block.getGenerationStamp() &&
          info.getNumBytes() > 0) {
        syncList.add(new BlockRecord(id, datanode, info));
      }
    } catch (RecoveryInProgressException ripE) {
      InterDatanodeProtocol.LOG.warn(
          "Recovery for replica " + block + " on data-node " + id
          + " is already in progress. Recovery id = "
          + rBlock.getNewGenerationStamp() + " is aborted.", ripE);
      return;
    } catch (IOException e) {
      ++errorCount;
      InterDatanodeProtocol.LOG.warn(
          "Failed to obtain replica info for block (=" + block 
          + ") from datanode (=" + id + ")", e);
    }
  }

  if (errorCount == datanodeids.length) {
    throw new IOException("All datanodes failed: block=" + block
        + ", datanodeids=" + Arrays.asList(datanodeids));
  }

  syncBlock(rBlock, syncList);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:DataNode.java

示例2: testUpdateReplicaUnderRecovery

import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入方法依赖的package包/类
/** 
 * Test  for
 * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} 
 * */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
    String bpid = cluster.getNamesystem().getBlockPoolId();

    //create a file
    DistributedFileSystem dfs = cluster.getFileSystem();
    String filestr = "/foo";
    Path filepath = new Path(filestr);
    DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);

    //get block info
    final LocatedBlock locatedblock = getLastLocatedBlock(
        DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
    final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
    Assert.assertTrue(datanodeinfo.length > 0);

    //get DataNode and FSDataset objects
    final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
    Assert.assertTrue(datanode != null);

    //initReplicaRecovery
    final ExtendedBlock b = locatedblock.getBlock();
    final long recoveryid = b.getGenerationStamp() + 1;
    final long newlength = b.getNumBytes() - 1;
    final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
    final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
        new RecoveringBlock(b, null, recoveryid));

    //check replica
    final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo(
        fsdataset, bpid, b.getBlockId());
    Assert.assertEquals(ReplicaState.RUR, replica.getState());

    //check meta data before update
    FsDatasetImpl.checkReplicaFiles(replica);

    //case "THIS IS NOT SUPPOSED TO HAPPEN"
    //with (block length) != (stored replica's on disk length). 
    {
      //create a block with same id and gs but different length.
      final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri
          .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
      try {
        //update should fail
        fsdataset.updateReplicaUnderRecovery(tmp, recoveryid,
            tmp.getBlockId(), newlength);
        Assert.fail();
      } catch(IOException ioe) {
        System.out.println("GOOD: getting " + ioe);
      }
    }

    //update
    final String storageID = fsdataset.updateReplicaUnderRecovery(
        new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid,
        rri.getBlockId(), newlength);
    assertTrue(storageID != null);

  } finally {
    if (cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:72,代码来源:TestInterDatanodeProtocol.java

示例3: recover

import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入方法依赖的package包/类
protected void recover() throws IOException {
  List<BlockRecord> syncList = new ArrayList<>(locs.length);
  int errorCount = 0;
  int candidateReplicaCnt = 0;

  // Check generation stamps, replica size and state. Replica must satisfy
  // the following criteria to be included in syncList for recovery:
  // - Valid generation stamp
  // - Non-zero length
  // - Original state is RWR or better
  for(DatanodeID id : locs) {
    try {
      DatanodeID bpReg = new DatanodeID(
          datanode.getBPOfferService(bpid).bpRegistration);
      InterDatanodeProtocol proxyDN = bpReg.equals(id)?
          datanode: DataNode.createInterDataNodeProtocolProxy(id, conf,
          dnConf.socketTimeout, dnConf.connectToDnViaHostname);
      ReplicaRecoveryInfo info = callInitReplicaRecovery(proxyDN, rBlock);
      if (info != null &&
          info.getGenerationStamp() >= block.getGenerationStamp() &&
          info.getNumBytes() > 0) {
        // Count the number of candidate replicas received.
        ++candidateReplicaCnt;
        if (info.getOriginalReplicaState().getValue() <=
            ReplicaState.RWR.getValue()) {
          syncList.add(new BlockRecord(id, proxyDN, info));
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Block recovery: Ignored replica with invalid " +
                "original state: " + info + " from DataNode: " + id);
          }
        }
      } else {
        if (LOG.isDebugEnabled()) {
          if (info == null) {
            LOG.debug("Block recovery: DataNode: " + id + " does not have "
                + "replica for block: " + block);
          } else {
            LOG.debug("Block recovery: Ignored replica with invalid "
                + "generation stamp or length: " + info + " from " +
                "DataNode: " + id);
          }
        }
      }
    } catch (RecoveryInProgressException ripE) {
      InterDatanodeProtocol.LOG.warn(
          "Recovery for replica " + block + " on data-node " + id
              + " is already in progress. Recovery id = "
              + rBlock.getNewGenerationStamp() + " is aborted.", ripE);
      return;
    } catch (IOException e) {
      ++errorCount;
      InterDatanodeProtocol.LOG.warn(
          "Failed to obtain replica info for block (=" + block
              + ") from datanode (=" + id + ")", e);
    }
  }

  if (errorCount == locs.length) {
    throw new IOException("All datanodes failed: block=" + block
        + ", datanodeids=" + Arrays.asList(locs));
  }

  // None of the replicas reported by DataNodes has the required original
  // state, report the error.
  if (candidateReplicaCnt > 0 && syncList.isEmpty()) {
    throw new IOException("Found " + candidateReplicaCnt +
        " replica(s) for block " + block + " but none is in " +
        ReplicaState.RWR.name() + " or better state. datanodeids=" +
        Arrays.asList(locs));
  }

  syncBlock(syncList);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:75,代码来源:BlockRecoveryWorker.java

示例4: testUpdateReplicaUnderRecovery

import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入方法依赖的package包/类
/** 
 * Test  for
 * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} 
 * */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
    String bpid = cluster.getNamesystem().getBlockPoolId();

    //create a file
    DistributedFileSystem dfs = cluster.getFileSystem();
    String filestr = "/foo";
    Path filepath = new Path(filestr);
    DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);

    //get block info
    final LocatedBlock locatedblock = getLastLocatedBlock(
        DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
    final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
    Assert.assertTrue(datanodeinfo.length > 0);

    //get DataNode and FSDataset objects
    final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
    Assert.assertTrue(datanode != null);

    //initReplicaRecovery
    final ExtendedBlock b = locatedblock.getBlock();
    final long recoveryid = b.getGenerationStamp() + 1;
    final long newlength = b.getNumBytes() - 1;
    final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
    final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
        new RecoveringBlock(b, null, recoveryid));

    //check replica
    final Replica replica =
        cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
    Assert.assertEquals(ReplicaState.RUR, replica.getState());

    //check meta data before update
    cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica);

    //case "THIS IS NOT SUPPOSED TO HAPPEN"
    //with (block length) != (stored replica's on disk length). 
    {
      //create a block with same id and gs but different length.
      final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri
          .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
      try {
        //update should fail
        fsdataset.updateReplicaUnderRecovery(tmp, recoveryid,
            tmp.getBlockId(), newlength);
        Assert.fail();
      } catch(IOException ioe) {
        System.out.println("GOOD: getting " + ioe);
      }
    }

    //update
    final String storageID = fsdataset.updateReplicaUnderRecovery(
        new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid,
        rri.getBlockId(), newlength);
    assertTrue(storageID != null);

  } finally {
    if (cluster != null) cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:72,代码来源:TestInterDatanodeProtocol.java

示例5: testUpdateReplicaUnderRecovery

import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入方法依赖的package包/类
/** 
 * Test  for
 * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} 
 * */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
    String bpid = cluster.getNamesystem().getBlockPoolId();

    //create a file
    DistributedFileSystem dfs = cluster.getFileSystem();
    String filestr = "/foo";
    Path filepath = new Path(filestr);
    DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);

    //get block info
    final LocatedBlock locatedblock = getLastLocatedBlock(
        DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
    final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
    Assert.assertTrue(datanodeinfo.length > 0);

    //get DataNode and FSDataset objects
    final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
    Assert.assertTrue(datanode != null);

    //initReplicaRecovery
    final ExtendedBlock b = locatedblock.getBlock();
    final long recoveryid = b.getGenerationStamp() + 1;
    final long newlength = b.getNumBytes() - 1;
    final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
    final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
        new RecoveringBlock(b, null, recoveryid));

    //check replica
    final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo(
        fsdataset, bpid, b.getBlockId());
    Assert.assertEquals(ReplicaState.RUR, replica.getState());

    //check meta data before update
    FsDatasetImpl.checkReplicaFiles(replica);

    //case "THIS IS NOT SUPPOSED TO HAPPEN"
    //with (block length) != (stored replica's on disk length). 
    {
      //create a block with same id and gs but different length.
      final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri
          .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
      try {
        //update should fail
        fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
        Assert.fail();
      } catch(IOException ioe) {
        System.out.println("GOOD: getting " + ioe);
      }
    }

    //update
    final String storageID = fsdataset.updateReplicaUnderRecovery(
        new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, newlength);
    assertTrue(storageID != null);

  } finally {
    if (cluster != null) cluster.shutdown();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:70,代码来源:TestInterDatanodeProtocol.java

示例6: recoverBlock

import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入方法依赖的package包/类
/**
 * Recover a block
 */
private void recoverBlock(RecoveringBlock rBlock) throws IOException {
  ExtendedBlock block = rBlock.getBlock();
  String blookPoolId = block.getBlockPoolId();
  DatanodeID[] datanodeids = rBlock.getLocations();
  List<BlockRecord> syncList = new ArrayList<>(datanodeids.length);
  int errorCount = 0;

  //check generation stamps
  for (DatanodeID id : datanodeids) {
    try {
      BPOfferService bpos = blockPoolManager.get(blookPoolId);
      DatanodeRegistration bpReg = bpos.bpRegistration;
      InterDatanodeProtocol datanode = bpReg.equals(id) ? this : DataNode
          .createInterDataNodeProtocolProxy(id, getConf(),
              dnConf.socketTimeout, dnConf.connectToDnViaHostname);
      ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
      if (info != null &&
          info.getGenerationStamp() >= block.getGenerationStamp() &&
          info.getNumBytes() > 0) {
        syncList.add(new BlockRecord(id, datanode, info));
      }
    } catch (RecoveryInProgressException ripE) {
      InterDatanodeProtocol.LOG.warn(
          "Recovery for replica " + block + " on data-node " + id +
              " is already in progress. Recovery id = " +
              rBlock.getNewGenerationStamp() + " is aborted.", ripE);
      return;
    } catch (IOException e) {
      ++errorCount;
      InterDatanodeProtocol.LOG.warn(
          "Failed to obtain replica info for block (=" + block +
              ") from datanode (=" + id + ")", e);
    }
  }

  if (errorCount == datanodeids.length) {
    throw new IOException(
        "All datanodes failed: block=" + block + ", datanodeids=" +
            Arrays.asList(datanodeids));
  }

  syncBlock(rBlock, syncList);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:47,代码来源:DataNode.java

示例7: testUpdateReplicaUnderRecovery

import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入方法依赖的package包/类
/**
 * Test  for
 * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long,
 * long)}
 */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
    String bpid = cluster.getNamesystem().getBlockPoolId();

    //create a file
    DistributedFileSystem dfs =
        (DistributedFileSystem) cluster.getFileSystem();
    String filestr = "/foo";
    Path filepath = new Path(filestr);
    DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);

    //get block info
    final LocatedBlock locatedblock =
        getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(),
            filestr);
    final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
    Assert.assertTrue(datanodeinfo.length > 0);

    //get DataNode and FSDataset objects
    final DataNode datanode =
        cluster.getDataNode(datanodeinfo[0].getIpcPort());
    Assert.assertTrue(datanode != null);

    //initReplicaRecovery
    final ExtendedBlock b = locatedblock.getBlock();
    final long recoveryid = b.getGenerationStamp() + 1;
    final long newlength = b.getNumBytes() - 1;
    final FsDatasetSpi<?> fsdataset =
        DataNodeTestUtils.getFSDataset(datanode);
    final ReplicaRecoveryInfo rri = fsdataset
        .initReplicaRecovery(new RecoveringBlock(b, null, recoveryid));

    //check replica
    final ReplicaInfo replica =
        FsDatasetTestUtil.fetchReplicaInfo(fsdataset, bpid, b.getBlockId());
    Assert.assertEquals(ReplicaState.RUR, replica.getState());

    //check meta data before update
    FsDatasetImpl.checkReplicaFiles(replica);

    //case "THIS IS NOT SUPPOSED TO HAPPEN"
    //with (block length) != (stored replica's on disk length). 
    {
      //create a block with same id and gs but different length.
      final ExtendedBlock tmp =
          new ExtendedBlock(b.getBlockPoolId(), rri.getBlockId(),
              rri.getNumBytes() - 1, rri.getGenerationStamp());
      try {
        //update should fail
        fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
        Assert.fail();
      } catch (IOException ioe) {
        System.out.println("GOOD: getting " + ioe);
      }
    }

    //update
    final String storageID = fsdataset.updateReplicaUnderRecovery(
        new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, newlength);
    assertTrue(storageID != null);

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:78,代码来源:TestInterDatanodeProtocol.java

示例8: recoverBlock

import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入方法依赖的package包/类
/** Recover a block */
private void recoverBlock(RecoveringBlock rBlock) throws IOException {
  Block block = rBlock.getBlock();
  DatanodeInfo[] targets = rBlock.getLocations();
  DatanodeID[] datanodeids = (DatanodeID[])targets;
  List<BlockRecord> syncList = new ArrayList<BlockRecord>(datanodeids.length);
  int errorCount = 0;

  //check generation stamps
  for(DatanodeID id : datanodeids) {
    try {
      InterDatanodeProtocol datanode = dnRegistration.equals(id)?
          this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
              socketTimeout);
      ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
      if (info != null &&
          info.getGenerationStamp() >= block.getGenerationStamp() &&
          info.getNumBytes() > 0) {
        syncList.add(new BlockRecord(id, datanode, info));
      }
    } catch (RecoveryInProgressException ripE) {
      InterDatanodeProtocol.LOG.warn(
          "Recovery for replica " + block + " on data-node " + id
          + " is already in progress. Recovery id = "
          + rBlock.getNewGenerationStamp() + " is aborted.", ripE);
      return;
    } catch (IOException e) {
      ++errorCount;
      InterDatanodeProtocol.LOG.warn(
          "Failed to obtain replica info for block (=" + block 
          + ") from datanode (=" + id + ")", e);
    }
  }

  if (errorCount == datanodeids.length) {
    throw new IOException("All datanodes failed: block=" + block
        + ", datanodeids=" + Arrays.asList(datanodeids));
  }

  syncBlock(rBlock, syncList);
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:42,代码来源:DataNode.java

示例9: testUpdateReplicaUnderRecovery

import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入方法依赖的package包/类
/** Test {@link FSDataset#updateReplicaUnderRecovery(ReplicaUnderRecovery, long, long)} */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
  final Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();

    //create a file
    DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
    String filestr = "/foo";
    Path filepath = new Path(filestr);
    DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);

    //get block info
    final LocatedBlock locatedblock = getLastLocatedBlock(
        dfs.getClient().getNamenode(), filestr);
    final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
    Assert.assertTrue(datanodeinfo.length > 0);

    //get DataNode and FSDataset objects
    final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
    Assert.assertTrue(datanode != null);
    Assert.assertTrue(datanode.data instanceof FSDataset);
    final FSDataset fsdataset = (FSDataset)datanode.data;

    //initReplicaRecovery
    final Block b = locatedblock.getBlock();
    final long recoveryid = b.getGenerationStamp() + 1;
    final long newlength = b.getNumBytes() - 1;
    final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
        new RecoveringBlock(b, null, recoveryid));

    //check replica
    final ReplicaInfo replica = fsdataset.fetchReplicaInfo(b.getBlockId());
    Assert.assertEquals(ReplicaState.RUR, replica.getState());

    //check meta data before update
    FSDataset.checkReplicaFiles(replica);

    //case "THIS IS NOT SUPPOSED TO HAPPEN"
    //with (block length) != (stored replica's on disk length). 
    {
      //create a block with same id and gs but different length.
      final Block tmp = new Block(rri.getBlockId(), rri.getNumBytes() - 1,
          rri.getGenerationStamp());
      try {
        //update should fail
        fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
        Assert.fail();
      } catch(IOException ioe) {
        System.out.println("GOOD: getting " + ioe);
      }
    }

    //update
    final ReplicaInfo finalized = fsdataset.updateReplicaUnderRecovery(
        rri, recoveryid, newlength);

    //check meta data after update
    FSDataset.checkReplicaFiles(finalized);
    Assert.assertEquals(b.getBlockId(), finalized.getBlockId());
    Assert.assertEquals(recoveryid, finalized.getGenerationStamp());
    Assert.assertEquals(newlength, finalized.getNumBytes());

  } finally {
    if (cluster != null) cluster.shutdown();
  }
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:72,代码来源:TestInterDatanodeProtocol.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo.getGenerationStamp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。