当前位置: 首页>>代码示例>>Java>>正文


Java LocatedBlock.getBlock方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LocatedBlock.getBlock方法的典型用法代码示例。如果您正苦于以下问题:Java LocatedBlock.getBlock方法的具体用法?Java LocatedBlock.getBlock怎么用?Java LocatedBlock.getBlock使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.LocatedBlock的用法示例。


在下文中一共展示了LocatedBlock.getBlock方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createAFileWithCorruptedBlockReplicas

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Create a file with one block and corrupt some/all of the block replicas.
 */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
    int corruptBlockCount) throws IOException, AccessControlException,
    FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
  DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
  DFSTestUtil.waitReplication(dfs, filePath, repl);
  // Locate the file blocks by asking name node
  final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
  Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
  // The file only has one block
  LocatedBlock lblock = locatedblocks.get(0);
  DatanodeInfo[] datanodeinfos = lblock.getLocations();
  ExtendedBlock block = lblock.getBlock();
  // corrupt some /all of the block replicas
  for (int i = 0; i < corruptBlockCount; i++) {
    DatanodeInfo dninfo = datanodeinfos[i];
    final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
    corruptBlock(block, dn);
    LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
        + dninfo);

  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestClientReportBadBlock.java

示例2: DFSOutputStream

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:DFSOutputStream.java

示例3: inferChecksumTypeByReading

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DFSClient.java

示例4: addBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private ExtendedBlock addBlocks(String fileName, String clientName)
throws IOException {
  ExtendedBlock prevBlock = null;
  for(int jdx = 0; jdx < blocksPerFile; jdx++) {
    LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName,
        prevBlock, null, INodeId.GRANDFATHER_INODE_ID, null);
    prevBlock = loc.getBlock();
    for(DatanodeInfo dnInfo : loc.getLocations()) {
      int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
      datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
      ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
          loc.getBlock().getLocalBlock(),
          ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          datanodes[dnIdx].storage.getStorageID(), rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
          .getBlock().getBlockPoolId(), report);
    }
  }
  return prevBlock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:NNThroughputBenchmark.java

示例5: triggerFailure

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * go to each block on the 2nd DataNode until it fails...
 * @param path
 * @param size
 * @throws IOException
 */
private void triggerFailure(String path, long size) throws IOException {
  NamenodeProtocols nn = cluster.getNameNodeRpc();
  List<LocatedBlock> locatedBlocks =
    nn.getBlockLocations(path, 0, size).getLocatedBlocks();
  
  for (LocatedBlock lb : locatedBlocks) {
    DatanodeInfo dinfo = lb.getLocations()[1];
    ExtendedBlock b = lb.getBlock();
    try {
      accessBlock(dinfo, lb);
    } catch (IOException e) {
      System.out.println("Failure triggered, on block: " + b.getBlockId() +  
          "; corresponding volume should be removed by now");
      break;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestDataNodeVolumeFailure.java

示例6: makeBadBlockList

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
  LocatedBlock goodLocatedBlock = goodBlockList.get(0);
  LocatedBlock badLocatedBlock = new LocatedBlock(
    goodLocatedBlock.getBlock(),
    new DatanodeInfo[] {
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
    },
    goodLocatedBlock.getStartOffset(),
    false);


  List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
  badBlocks.add(badLocatedBlock);
  return new LocatedBlocks(goodBlockList.getFileLength(), false,
                           badBlocks, null, true,
                           null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDFSClientRetries.java

示例7: getBestNodeDNAddrPair

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Get the best node from which to stream the data.
 * @param block LocatedBlock, containing nodes in priority order.
 * @param ignoredNodes Do not choose nodes in this array (may be null)
 * @return The DNAddrPair of the best node.
 * @throws IOException
 */
private DNAddrPair getBestNodeDNAddrPair(LocatedBlock block,
    Collection<DatanodeInfo> ignoredNodes) throws IOException {
  DatanodeInfo[] nodes = block.getLocations();
  StorageType[] storageTypes = block.getStorageTypes();
  DatanodeInfo chosenNode = null;
  StorageType storageType = null;
  if (nodes != null) {
    for (int i = 0; i < nodes.length; i++) {
      if (!deadNodes.containsKey(nodes[i])
          && (ignoredNodes == null || !ignoredNodes.contains(nodes[i]))) {
        chosenNode = nodes[i];
        // Storage types are ordered to correspond with nodes, so use the same
        // index to get storage type.
        if (storageTypes != null && i < storageTypes.length) {
          storageType = storageTypes[i];
        }
        break;
      }
    }
  }
  if (chosenNode == null) {
    throw new IOException("No live nodes contain block " + block.getBlock() +
        " after checking nodes = " + Arrays.toString(nodes) +
        ", ignoredNodes = " + ignoredNodes);
  }
  final String dnAddr =
      chosenNode.getXferAddr(dfsClient.getConf().connectToDnViaHostname);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
  }
  InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
  return new DNAddrPair(chosenNode, targetAddr, storageType);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:DFSInputStream.java

示例8: DataStreamer

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Construct a data streamer for appending to the last partial block
 * @param lastBlock last block of the file to be appended
 * @param stat status of the file to be appended
 * @param bytesPerChecksum number of bytes per checksum
 * @throws IOException if error occurs
 */
private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
    int bytesPerChecksum) throws IOException {
  isAppend = true;
  stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
  block = lastBlock.getBlock();
  bytesSent = block.getNumBytes();
  accessToken = lastBlock.getBlockToken();
  isLazyPersistFile = isLazyPersist(stat);
  long usedInLastBlock = stat.getLen() % blockSize;
  int freeInLastBlock = (int)(blockSize - usedInLastBlock);

  // calculate the amount of free space in the pre-existing 
  // last crc chunk
  int usedInCksum = (int)(stat.getLen() % bytesPerChecksum);
  int freeInCksum = bytesPerChecksum - usedInCksum;

  // if there is space in the last block, then we have to 
  // append to that block
  if (freeInLastBlock == blockSize) {
    throw new IOException("The last block for file " + 
        src + " is full.");
  }

  if (usedInCksum > 0 && freeInCksum > 0) {
    // if there is space in the last partial chunk, then 
    // setup in such a way that the next packet will have only 
    // one chunk that fills up the partial chunk.
    //
    computePacketChunkSize(0, freeInCksum);
    setChecksumBufSize(freeInCksum);
    appendChunk = true;
  } else {
    // if the remaining space in the block is smaller than 
    // that expected size of of a packet, then create 
    // smaller size packet.
    //
    computePacketChunkSize(Math.min(dfsClient.getConf().writePacketSize, freeInLastBlock), 
        bytesPerChecksum);
  }

  // setup pipeline to append to the last block XXX retries??
  setPipeline(lastBlock);
  errorIndex = -1;   // no errors yet.
  if (nodes.length < 1) {
    throw new IOException("Unable to retrieve blocks locations " +
        " for last block " + block +
        "of file " + src);

  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:DFSOutputStream.java

示例9: nextBlockOutputStream

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Open a DataOutputStream to a DataNode so that it can be written to.
 * This happens when a file is created and each time a new block is allocated.
 * Must get block ID and the IDs of the destinations from the namenode.
 * Returns the list of target datanodes.
 */
private LocatedBlock nextBlockOutputStream() throws IOException {
  LocatedBlock lb = null;
  DatanodeInfo[] nodes = null;
  StorageType[] storageTypes = null;
  int count = dfsClient.getConf().nBlockWriteRetry;
  boolean success = false;
  ExtendedBlock oldBlock = block;
  do {
    hasError = false;
    lastException.set(null);
    errorIndex = -1;
    success = false;

    DatanodeInfo[] excluded =
        excludedNodes.getAllPresent(excludedNodes.asMap().keySet())
        .keySet()
        .toArray(new DatanodeInfo[0]);
    block = oldBlock;
    lb = locateFollowingBlock(excluded.length > 0 ? excluded : null);
    block = lb.getBlock();
    block.setNumBytes(0);
    bytesSent = 0;
    accessToken = lb.getBlockToken();
    nodes = lb.getLocations();
    storageTypes = lb.getStorageTypes();

    //
    // Connect to first DataNode in the list.
    //
    success = createBlockOutputStream(nodes, storageTypes, 0L, false);

    if (!success) {
      DFSClient.LOG.info("Abandoning " + block);
      dfsClient.namenode.abandonBlock(block, fileId, src,
          dfsClient.clientName);
      block = null;
      DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]);
      excludedNodes.put(nodes[errorIndex], nodes[errorIndex]);
    }
  } while (!success && --count >= 0);

  if (!success) {
    throw new IOException("Unable to create new block.");
  }
  return lb;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:DFSOutputStream.java

示例10: chooseDataNode

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private DNAddrPair chooseDataNode(LocatedBlock block,
    Collection<DatanodeInfo> ignoredNodes) throws IOException {
  while (true) {
    try {
      return getBestNodeDNAddrPair(block, ignoredNodes);
    } catch (IOException ie) {
      String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
        deadNodes, ignoredNodes);
      String blockInfo = block.getBlock() + " file=" + src;
      if (failures >= dfsClient.getMaxBlockAcquireFailures()) {
        String description = "Could not obtain block: " + blockInfo;
        DFSClient.LOG.warn(description + errMsg
            + ". Throwing a BlockMissingException");
        throw new BlockMissingException(src, description,
            block.getStartOffset());
      }

      DatanodeInfo[] nodes = block.getLocations();
      if (nodes == null || nodes.length == 0) {
        DFSClient.LOG.info("No node available for " + blockInfo);
      }
      DFSClient.LOG.info("Could not obtain " + block.getBlock()
          + " from any node: " + ie + errMsg
          + ". Will get new block locations from namenode and retry...");
      try {
        // Introducing a random factor to the wait time before another retry.
        // The wait time is dependent on # of failures and a random factor.
        // At the first time of getting a BlockMissingException, the wait time
        // is a random number between 0..3000 ms. If the first retry
        // still fails, we will wait 3000 ms grace period before the 2nd retry.
        // Also at the second retry, the waiting window is expanded to 6000 ms
        // alleviating the request rate from the server. Similarly the 3rd retry
        // will wait 6000ms grace period before retry and the waiting window is
        // expanded to 9000ms. 
        final int timeWindow = dfsClient.getConf().timeWindow;
        double waitTime = timeWindow * failures +       // grace period for the last round of attempt
          timeWindow * (failures + 1) * DFSUtil.getRandom().nextDouble(); // expanding time window for each failure
        DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " IOException, will wait for " + waitTime + " msec.");
        Thread.sleep((long)waitTime);
      } catch (InterruptedException iex) {
      }
      deadNodes.clear(); //2nd option is to remove only nodes[blockId]
      openInfo();
      block = getBlockAt(block.getStartOffset());
      failures++;
      continue;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:DFSInputStream.java

示例11: tryRead

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:67,代码来源:TestBlockTokenWithDFS.java

示例12: accessBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 
 
  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestDataNodeVolumeFailure.java

示例13: checkBlockMetaDataInfo

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * The following test first creates a file.
 * It verifies the block information from a datanode.
 * Then, it updates the block with new information and verifies again.
 * @param useDnHostname whether DNs should connect to other DNs by hostname
 */
private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception {
  MiniDFSCluster cluster = null;

  conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname);
  if (useDnHostname) {
    // Since the mini cluster only listens on the loopback we have to
    // ensure the hostname used to access DNs maps to the loopback. We
    // do this by telling the DN to advertise localhost as its hostname
    // instead of the default hostname.
    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
  }

  try {
    cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .checkDataNodeHostConfig(true)
      .build();
    cluster.waitActive();

    //create a file
    DistributedFileSystem dfs = cluster.getFileSystem();
    String filestr = "/foo";
    Path filepath = new Path(filestr);
    DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
    assertTrue(dfs.exists(filepath));

    //get block info
    LocatedBlock locatedblock = getLastLocatedBlock(
        DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
    DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
    assertTrue(datanodeinfo.length > 0);

    //connect to a data node
    DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
    InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy(
        datanode, datanodeinfo[0], conf, useDnHostname);
    
    // Stop the block scanners.
    datanode.getBlockScanner().removeAllVolumeScanners();

    //verify BlockMetaDataInfo
    ExtendedBlock b = locatedblock.getBlock();
    InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
    checkMetaInfo(b, datanode);
    long recoveryId = b.getGenerationStamp() + 1;
    idp.initReplicaRecovery(
        new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));

    //verify updateBlock
    ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(),
        b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
    idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(),
        newblock.getNumBytes());
    checkMetaInfo(newblock, datanode);
    
    // Verify correct null response trying to init recovery for a missing block
    ExtendedBlock badBlock = new ExtendedBlock("fake-pool",
        b.getBlockId(), 0, 0);
    assertNull(idp.initReplicaRecovery(
        new RecoveringBlock(badBlock,
            locatedblock.getLocations(), recoveryId)));
  }
  finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:73,代码来源:TestInterDatanodeProtocol.java

示例14: testUpdateReplicaUnderRecovery

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/** 
 * Test  for
 * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} 
 * */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
    String bpid = cluster.getNamesystem().getBlockPoolId();

    //create a file
    DistributedFileSystem dfs = cluster.getFileSystem();
    String filestr = "/foo";
    Path filepath = new Path(filestr);
    DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);

    //get block info
    final LocatedBlock locatedblock = getLastLocatedBlock(
        DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
    final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
    Assert.assertTrue(datanodeinfo.length > 0);

    //get DataNode and FSDataset objects
    final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
    Assert.assertTrue(datanode != null);

    //initReplicaRecovery
    final ExtendedBlock b = locatedblock.getBlock();
    final long recoveryid = b.getGenerationStamp() + 1;
    final long newlength = b.getNumBytes() - 1;
    final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
    final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
        new RecoveringBlock(b, null, recoveryid));

    //check replica
    final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo(
        fsdataset, bpid, b.getBlockId());
    Assert.assertEquals(ReplicaState.RUR, replica.getState());

    //check meta data before update
    FsDatasetImpl.checkReplicaFiles(replica);

    //case "THIS IS NOT SUPPOSED TO HAPPEN"
    //with (block length) != (stored replica's on disk length). 
    {
      //create a block with same id and gs but different length.
      final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri
          .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
      try {
        //update should fail
        fsdataset.updateReplicaUnderRecovery(tmp, recoveryid,
            tmp.getBlockId(), newlength);
        Assert.fail();
      } catch(IOException ioe) {
        System.out.println("GOOD: getting " + ioe);
      }
    }

    //update
    final String storageID = fsdataset.updateReplicaUnderRecovery(
        new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid,
        rri.getBlockId(), newlength);
    assertTrue(storageID != null);

  } finally {
    if (cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:72,代码来源:TestInterDatanodeProtocol.java

示例15: setup

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Setup a {@link MiniDFSCluster}.
 * Create a block with both {@link State#NORMAL} and {@link State#READ_ONLY_SHARED} replicas.
 */
@Before
public void setup() throws IOException, InterruptedException {
  conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  
  Configuration[] overlays = new Configuration[NUM_DATANODES];
  for (int i = 0; i < overlays.length; i++) {
    overlays[i] = new Configuration();
    if (i == RO_NODE_INDEX) {
      overlays[i].setEnum(SimulatedFSDataset.CONFIG_PROPERTY_STATE, 
          i == RO_NODE_INDEX 
            ? READ_ONLY_SHARED
            : NORMAL);
    }
  }
  
  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(NUM_DATANODES)
      .dataNodeConfOverlays(overlays)
      .build();
  fs = cluster.getFileSystem();
  blockManager = cluster.getNameNode().getNamesystem().getBlockManager();
  datanodeManager = blockManager.getDatanodeManager();
  client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()),
                         cluster.getConfiguration(0));
  
  for (int i = 0; i < NUM_DATANODES; i++) {
    DataNode dataNode = cluster.getDataNodes().get(i);
    validateStorageState(
        BlockManagerTestUtil.getStorageReportsForDatanode(
            datanodeManager.getDatanode(dataNode.getDatanodeId())),
            i == RO_NODE_INDEX 
              ? READ_ONLY_SHARED
              : NORMAL);
  }
  
  // Create a 1 block file
  DFSTestUtil.createFile(fs, PATH, BLOCK_SIZE, BLOCK_SIZE,
                         BLOCK_SIZE, (short) 1, seed);
  
  LocatedBlock locatedBlock = getLocatedBlock();
  extendedBlock = locatedBlock.getBlock();
  block = extendedBlock.getLocalBlock();
  
  assertThat(locatedBlock.getLocations().length, is(1));
  normalDataNode = locatedBlock.getLocations()[0];
  readOnlyDataNode = datanodeManager.getDatanode(cluster.getDataNodes().get(RO_NODE_INDEX).getDatanodeId());
  assertThat(normalDataNode, is(not(readOnlyDataNode)));
  
  validateNumberReplicas(1);
  
  // Inject the block into the datanode with READ_ONLY_SHARED storage 
  cluster.injectBlocks(0, RO_NODE_INDEX, Collections.singleton(block));
  
  // There should now be 2 *locations* for the block
  // Must wait until the NameNode has processed the block report for the injected blocks
  waitForLocations(2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestReadOnlySharedStorage.java


注:本文中的org.apache.hadoop.hdfs.protocol.LocatedBlock.getBlock方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。