当前位置: 首页>>代码示例>>Java>>正文


Java FinalizedReplica类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.FinalizedReplica的典型用法代码示例。如果您正苦于以下问题:Java FinalizedReplica类的具体用法?Java FinalizedReplica怎么用?Java FinalizedReplica使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FinalizedReplica类属于org.apache.hadoop.hdfs.server.datanode包,在下文中一共展示了FinalizedReplica类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: fetchReplicaInfo

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
/**
 * This should be primarily used for testing.
 * @return clone of replica store in datanode memory
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
  ReplicaInfo r = volumeMap.get(bpid, blockId);
  if(r == null)
    return null;
  switch(r.getState()) {
  case FINALIZED:
    return new FinalizedReplica((FinalizedReplica)r);
  case RBW:
    return new ReplicaBeingWritten((ReplicaBeingWritten)r);
  case RWR:
    return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered)r);
  case RUR:
    return new ReplicaUnderRecovery((ReplicaUnderRecovery)r);
  case TEMPORARY:
    return new ReplicaInPipeline((ReplicaInPipeline)r);
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:FsDatasetImpl.java

示例2: testMix

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
@Test
public void testMix() {
  BlockListAsLongs blocks = checkReport(
      new FinalizedReplica(b1, null, null),
      new FinalizedReplica(b2, null, null),
      new ReplicaBeingWritten(b3, null, null, null),
      new ReplicaWaitingToBeRecovered(b4, null, null));
  assertArrayEquals(
      new long[] {
          2, 2,
          1, 11, 111,
          2, 22, 222,
          -1, -1, -1,
          3, 33, 333, ReplicaState.RBW.getValue(),
          4, 44, 444, ReplicaState.RWR.getValue() },
      blocks.getBlockListAsLongs());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestBlockListAsLongs.java

示例3: testFuzz

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
@Test
public void testFuzz() throws InterruptedException {
  Replica[] replicas = new Replica[100000];
  Random rand = new Random(0);
  for (int i=0; i<replicas.length; i++) {
    Block b = new Block(rand.nextLong(), i, i<<4);
    switch (rand.nextInt(2)) {
      case 0:
        replicas[i] = new FinalizedReplica(b, null, null);
        break;
      case 1:
        replicas[i] = new ReplicaBeingWritten(b, null, null, null);
        break;
      case 2:
        replicas[i] = new ReplicaWaitingToBeRecovered(b, null, null);
        break;
    }
  }
  checkReport(replicas);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestBlockListAsLongs.java

示例4: injectCorruptReplica

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
@Override
public void injectCorruptReplica(ExtendedBlock block) throws IOException {
  Preconditions.checkState(!dataset.contains(block),
      "Block " + block + " already exists on dataset.");
  try (FsVolumeReferences volRef = dataset.getFsVolumeReferences()) {
    FsVolumeImpl volume = (FsVolumeImpl) volRef.get(0);
    FinalizedReplica finalized = new FinalizedReplica(
        block.getLocalBlock(),
        volume,
        volume.getFinalizedDir(block.getBlockPoolId()));
    File blockFile = finalized.getBlockFile();
    if (!blockFile.createNewFile()) {
      throw new FileExistsException(
          "Block file " + blockFile + " already exists.");
    }
    File metaFile = FsDatasetUtil.getMetaFile(blockFile, 1000);
    if (!metaFile.createNewFile()) {
      throw new FileExistsException(
          "Meta file " + metaFile + " already exists."
      );
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:FsDatasetImplTestUtils.java

示例5: fetchReplicaInfo

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
/**
 * Returns a clone of a replica stored in data-node memory.
 * Should be primarily used for testing.
 * @param blockId
 * @return
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
  ReplicaInfo r = volumeMap.get(bpid, blockId);
  if(r == null)
    return null;
  switch(r.getState()) {
  case FINALIZED:
    return new FinalizedReplica((FinalizedReplica)r);
  case RBW:
    return new ReplicaBeingWritten((ReplicaBeingWritten)r);
  case RWR:
    return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered)r);
  case RUR:
    return new ReplicaUnderRecovery((ReplicaUnderRecovery)r);
  case TEMPORARY:
    return new ReplicaInPipeline((ReplicaInPipeline)r);
  }
  return null;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:FsDatasetImpl.java

示例6: recoverAppend

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
@Override  // FsDatasetSpi
public synchronized ReplicaInPipeline recoverAppend(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  LOG.info("Recover failed append to " + b);

  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);

  // change the replica's state/gs etc.
  if (replicaInfo.getState() == ReplicaState.FINALIZED ) {
    return append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo, newGS, 
        b.getNumBytes());
  } else { //RBW
    bumpReplicaGS(replicaInfo, newGS);
    return (ReplicaBeingWritten)replicaInfo;
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:FsDatasetImpl.java

示例7: fetchReplicaInfo

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
/**
 * Returns a clone of a replica stored in data-node memory.
 * Should be primarily used for testing.
 *
 * @param blockId
 * @return
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
  ReplicaInfo r = volumeMap.get(bpid, blockId);
  if (r == null) {
    return null;
  }
  switch (r.getState()) {
    case FINALIZED:
      return new FinalizedReplica((FinalizedReplica) r);
    case RBW:
      return new ReplicaBeingWritten((ReplicaBeingWritten) r);
    case RWR:
      return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered) r);
    case RUR:
      return new ReplicaUnderRecovery((ReplicaUnderRecovery) r);
    case TEMPORARY:
      return new ReplicaInPipeline((ReplicaInPipeline) r);
  }
  return null;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:FsDatasetImpl.java

示例8: recoverAppend

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
@Override  // FsDatasetSpi
public synchronized ReplicaInPipeline recoverAppend(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  LOG.info("Recover failed append to " + b);

  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);

  // change the replica's state/gs etc.
  if (replicaInfo.getState() == ReplicaState.FINALIZED) {
    return append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo, newGS,
        b.getNumBytes());
  } else { //RBW
    bumpReplicaGS(replicaInfo, newGS);
    return (ReplicaBeingWritten) replicaInfo;
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:17,代码来源:FsDatasetImpl.java

示例9: append

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
@Override  // FsDatasetSpi
public synchronized ReplicaHandler append(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS + 
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaInfo.getNumBytes() + 
        " expected length is " + expectedBlockLen);
  }

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica = null;
  try {
    replica = append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
        b.getNumBytes());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:FsDatasetImpl.java

示例10: getFinalizedBlocks

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
/**
 * Get the list of finalized blocks from in-memory blockmap for a block pool.
 */
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocks(String bpid) {
  ArrayList<FinalizedReplica> finalized =
      new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
  for (ReplicaInfo b : volumeMap.replicas(bpid)) {
    if(b.getState() == ReplicaState.FINALIZED) {
      finalized.add(new FinalizedReplica((FinalizedReplica)b));
    }
  }
  return finalized;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:FsDatasetImpl.java

示例11: getFinalizedBlocksOnPersistentStorage

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
/**
 * Get the list of finalized blocks from in-memory blockmap for a block pool.
 */
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid) {
  ArrayList<FinalizedReplica> finalized =
      new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
  for (ReplicaInfo b : volumeMap.replicas(bpid)) {
    if(!b.getVolume().isTransientStorage() &&
       b.getState() == ReplicaState.FINALIZED) {
      finalized.add(new FinalizedReplica((FinalizedReplica)b));
    }
  }
  return finalized;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:FsDatasetImpl.java

示例12: testFinalized

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
@Test
public void testFinalized() {
  BlockListAsLongs blocks = checkReport(
      new FinalizedReplica(b1, null, null));
  assertArrayEquals(
      new long[] {
          1, 0,
          1, 11, 111,
          -1, -1, -1 }, 
      blocks.getBlockListAsLongs());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestBlockListAsLongs.java

示例13: testRemove

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
@Test
public void testRemove() {
  // Test 1: null argument throws invalid argument exception
  try {
    map.remove(bpid, null);
    fail("Expected exception not thrown");
  } catch (IllegalArgumentException expected) { }
  
  // Test 2: remove failure - generation stamp mismatch 
  Block b = new Block(block);
  b.setGenerationStamp(0);
  assertNull(map.remove(bpid, b));
  
  // Test 3: remove failure - blockID mismatch
  b.setGenerationStamp(block.getGenerationStamp());
  b.setBlockId(0);
  assertNull(map.remove(bpid, b));
  
  // Test 4: remove success
  assertNotNull(map.remove(bpid, block));
  
  // Test 5: remove failure - invalid blockID
  assertNull(map.remove(bpid, 0));
  
  // Test 6: remove success
  map.add(bpid, new FinalizedReplica(block, null, null));
  assertNotNull(map.remove(bpid, block.getBlockId()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestReplicaMap.java

示例14: testDuplicateReplicaResolution

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
@Test
public void testDuplicateReplicaResolution() throws IOException {
  FsVolumeImpl fsv1 = Mockito.mock(FsVolumeImpl.class);
  FsVolumeImpl fsv2 = Mockito.mock(FsVolumeImpl.class);

  File f1 = new File("d1/block");
  File f2 = new File("d2/block");

  ReplicaInfo replicaOlder = new FinalizedReplica(1,1,1,fsv1,f1);
  ReplicaInfo replica = new FinalizedReplica(1,2,2,fsv1,f1);
  ReplicaInfo replicaSame = new FinalizedReplica(1,2,2,fsv1,f1);
  ReplicaInfo replicaNewer = new FinalizedReplica(1,3,3,fsv1,f1);

  ReplicaInfo replicaOtherOlder = new FinalizedReplica(1,1,1,fsv2,f2);
  ReplicaInfo replicaOtherSame = new FinalizedReplica(1,2,2,fsv2,f2);
  ReplicaInfo replicaOtherNewer = new FinalizedReplica(1,3,3,fsv2,f2);

  // equivalent path so don't remove either
  assertNull(BlockPoolSlice.selectReplicaToDelete(replicaSame, replica));
  assertNull(BlockPoolSlice.selectReplicaToDelete(replicaOlder, replica));
  assertNull(BlockPoolSlice.selectReplicaToDelete(replicaNewer, replica));

  // keep latest found replica
  assertSame(replica,
      BlockPoolSlice.selectReplicaToDelete(replicaOtherSame, replica));
  assertSame(replicaOtherOlder,
      BlockPoolSlice.selectReplicaToDelete(replicaOtherOlder, replica));
  assertSame(replica,
      BlockPoolSlice.selectReplicaToDelete(replicaOtherNewer, replica));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestFsDatasetImpl.java

示例15: createFinalizedReplica

import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; //导入依赖的package包/类
@Override
public Replica createFinalizedReplica(FsVolumeSpi volume, ExtendedBlock block)
    throws IOException {
  FsVolumeImpl vol = (FsVolumeImpl) volume;
  ReplicaInfo info = new FinalizedReplica(block.getLocalBlock(), vol,
      vol.getCurrentDir().getParentFile());
  dataset.volumeMap.add(block.getBlockPoolId(), info);
  info.getBlockFile().createNewFile();
  info.getMetaFile().createNewFile();
  return info;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:12,代码来源:FsDatasetImplTestUtils.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.FinalizedReplica类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。