当前位置: 首页>>代码示例>>Java>>正文


Java LocatedBlock类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LocatedBlock的典型用法代码示例。如果您正苦于以下问题:Java LocatedBlock类的具体用法?Java LocatedBlock怎么用?Java LocatedBlock使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


LocatedBlock类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了LocatedBlock类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: triggerFailure

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
/**
 * go to each block on the 2nd DataNode until it fails...
 * @param path
 * @param size
 * @throws IOException
 */
private void triggerFailure(String path, long size) throws IOException {
  NamenodeProtocols nn = cluster.getNameNodeRpc();
  List<LocatedBlock> locatedBlocks =
    nn.getBlockLocations(path, 0, size).getLocatedBlocks();
  
  for (LocatedBlock lb : locatedBlocks) {
    DatanodeInfo dinfo = lb.getLocations()[1];
    ExtendedBlock b = lb.getBlock();
    try {
      accessBlock(dinfo, lb);
    } catch (IOException e) {
      System.out.println("Failure triggered, on block: " + b.getBlockId() +  
          "; corresponding volume should be removed by now");
      break;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestDataNodeVolumeFailure.java

示例2: newStreamForAppend

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, int bufferSize, Progressable progress,
    LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
    String[] favoredNodes) throws IOException {
  TraceScope scope =
      dfsClient.getPathTraceScope("newStreamForAppend", src);
  try {
    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
        progress, lastBlock, stat, checksum);
    if (favoredNodes != null && favoredNodes.length != 0) {
      out.streamer.setFavoredNodes(favoredNodes);
    }
    out.start();
    return out;
  } finally {
    scope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DFSOutputStream.java

示例3: makeBadBlockList

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
  LocatedBlock goodLocatedBlock = goodBlockList.get(0);
  LocatedBlock badLocatedBlock = new LocatedBlock(
    goodLocatedBlock.getBlock(),
    new DatanodeInfo[] {
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
    },
    goodLocatedBlock.getStartOffset(),
    false);


  List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
  badBlocks.add(badLocatedBlock);
  return new LocatedBlocks(goodBlockList.getFileLength(), false,
                           badBlocks, null, true,
                           null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDFSClientRetries.java

示例4: testMissingBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
  // Create a file with single block with two replicas
  Path file = getTestPath("testMissingBlocks");
  createFile(file, 100, (short)1);
  
  // Corrupt the only replica of the block to result in a missing block
  LocatedBlock block = NameNodeAdapter.getBlockLocations(
      cluster.getNameNode(), file.toString(), 0, 1).get(0);
  cluster.getNamesystem().writeLock();
  try {
    bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
        "STORAGE_ID", "TEST");
  } finally {
    cluster.getNamesystem().writeUnlock();
  }
  updateMetrics();
  MetricsRecordBuilder rb = getMetrics(NS_METRICS);
  assertGauge("UnderReplicatedBlocks", 1L, rb);
  assertGauge("MissingBlocks", 1L, rb);
  assertGauge("MissingReplOneBlocks", 1L, rb);
  fs.delete(file, true);
  waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestNameNodeMetrics.java

示例5: addBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
private ExtendedBlock addBlocks(String fileName, String clientName)
throws IOException {
  ExtendedBlock prevBlock = null;
  for(int jdx = 0; jdx < blocksPerFile; jdx++) {
    LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName,
        prevBlock, null, INodeId.GRANDFATHER_INODE_ID, null);
    prevBlock = loc.getBlock();
    for(DatanodeInfo dnInfo : loc.getLocations()) {
      int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
      datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
      ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
          loc.getBlock().getLocalBlock(),
          ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          datanodes[dnIdx].storage.getStorageID(), rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
          .getBlock().getBlockPoolId(), report);
    }
  }
  return prevBlock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:NNThroughputBenchmark.java

示例6: getFromOneDataNode

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode,
    final LocatedBlock block, final long start, final long end,
    final ByteBuffer bb,
    final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap,
    final int hedgedReadId) {
  final Span parentSpan = Trace.currentSpan();
  return new Callable<ByteBuffer>() {
    @Override
    public ByteBuffer call() throws Exception {
      byte[] buf = bb.array();
      int offset = bb.position();
      TraceScope scope =
          Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan);
      try {
        actualGetFromOneDataNode(datanode, block, start, end, buf, offset,
            corruptedBlockMap);
        return bb;
      } finally {
        scope.close();
      }
    }
  };
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:DFSInputStream.java

示例7: reportCheckSumFailure

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
/**
 * DFSInputStream reports checksum failure.
 * Case I : client has tried multiple data nodes and at least one of the
 * attempts has succeeded. We report the other failures as corrupted block to
 * namenode. 
 * Case II: client has tried out all data nodes, but all failed. We
 * only report if the total number of replica is 1. We do not
 * report otherwise since this maybe due to the client is a handicapped client
 * (who can not read).
 * @param corruptedBlockMap map of corrupted blocks
 * @param dataNodeCount number of data nodes who contains the block replicas
 */
private void reportCheckSumFailure(
    Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap, 
    int dataNodeCount) {
  if (corruptedBlockMap.isEmpty()) {
    return;
  }
  Iterator<Entry<ExtendedBlock, Set<DatanodeInfo>>> it = corruptedBlockMap
      .entrySet().iterator();
  Entry<ExtendedBlock, Set<DatanodeInfo>> entry = it.next();
  ExtendedBlock blk = entry.getKey();
  Set<DatanodeInfo> dnSet = entry.getValue();
  if (((dnSet.size() < dataNodeCount) && (dnSet.size() > 0))
      || ((dataNodeCount == 1) && (dnSet.size() == dataNodeCount))) {
    DatanodeInfo[] locs = new DatanodeInfo[dnSet.size()];
    int i = 0;
    for (DatanodeInfo dn:dnSet) {
      locs[i++] = dn;
    }
    LocatedBlock [] lblocks = { new LocatedBlock(blk, locs) };
    dfsClient.reportChecksumFailure(src, lblocks);
  }
  corruptedBlockMap.clear();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:DFSInputStream.java

示例8: connectToDN

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:DFSClient.java

示例9: convertToVolumeBlockLocations

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
/**
 * Helper method to combine a list of {@link LocatedBlock} with associated
 * {@link VolumeId} information to form a list of {@link BlockStorageLocation}
 * .
 */
static BlockStorageLocation[] convertToVolumeBlockLocations(
    List<LocatedBlock> blocks, 
    Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
  // Construct the final return value of VolumeBlockLocation[]
  BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
  List<BlockStorageLocation> volumeBlockLocs = 
      new ArrayList<BlockStorageLocation>(locations.length);
  for (int i = 0; i < locations.length; i++) {
    LocatedBlock locBlock = blocks.get(i);
    List<VolumeId> volumeIds = blockVolumeIds.get(locBlock);
    BlockStorageLocation bsLoc = new BlockStorageLocation(locations[i], 
        volumeIds.toArray(new VolumeId[0]));
    volumeBlockLocs.add(bsLoc);
  }
  return volumeBlockLocs.toArray(new BlockStorageLocation[] {});
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:BlockStorageLocationUtil.java

示例10: getAdditionalBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
/**
 * The client would like to obtain an additional block for the indicated
 * filename (which is being written-to).  Return an array that consists
 * of the block, plus a set of machines.  The first on this list should
 * be where the client writes data.  Subsequent items in the list must
 * be provided in the connection to the first datanode.
 *
 * Make sure the previous blocks have been reported by datanodes and
 * are replicated.  Will return an empty 2-elt array if we want the
 * client to "try again later".
 */
LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
    ExtendedBlock previous, Set<Node> excludedNodes, 
    List<String> favoredNodes) throws IOException {
  LocatedBlock[] onRetryBlock = new LocatedBlock[1];
  DatanodeStorageInfo targets[] = getNewBlockTargets(src, fileId,
      clientName, previous, excludedNodes, favoredNodes, onRetryBlock);
  if (targets == null) {
    assert onRetryBlock[0] != null : "Retry block is null";
    // This is a retry. Just return the last block.
    return onRetryBlock[0];
  }
  LocatedBlock newBlock = storeAllocatedBlock(
      src, fileId, clientName, previous, targets);
  return newBlock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:FSNamesystem.java

示例11: addBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
@Override
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
    String[] favoredNodes)
    throws IOException {
  checkNNStartup();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
        + " fileId=" + fileId + " for " + clientName);
  }
  Set<Node> excludedNodesSet = null;
  if (excludedNodes != null) {
    excludedNodesSet = new HashSet<Node>(excludedNodes.length);
    for (Node node : excludedNodes) {
      excludedNodesSet.add(node);
    }
  }
  List<String> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);
  LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
      clientName, previous, excludedNodesSet, favoredNodesList);
  if (locatedBlock != null)
    metrics.incrAddBlockOps();
  return locatedBlock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:NameNodeRpcServer.java

示例12: generateBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
private static ExtendedBlock[][] generateBlocks(Suite s, long size
    ) throws IOException, InterruptedException, TimeoutException {
  final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
  for(int n = 0; n < s.clients.length; n++) {
    final long fileLen = size/s.replication;
    createFile(s, n, fileLen);

    final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations(
        FILE_NAME, 0, fileLen).getLocatedBlocks();

    final int numOfBlocks = locatedBlocks.size();
    blocks[n] = new ExtendedBlock[numOfBlocks];
    for(int i = 0; i < numOfBlocks; i++) {
      final ExtendedBlock b = locatedBlocks.get(i).getBlock();
      blocks[n][i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(),
          b.getNumBytes(), b.getGenerationStamp());
    }
  }
  return blocks;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestBalancerWithMultipleNameNodes.java

示例13: testPlacement

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
private void testPlacement(String clientMachine,
    String clientRack) throws IOException {
  // write 5 files and check whether all times block placed
  for (int i = 0; i < 5; i++) {
    String src = "/test-" + i;
    // Create the file with client machine
    HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
        clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
        REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
    LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
        null, null, fileStatus.getFileId(), null);

    assertEquals("Block should be allocated sufficient locations",
        REPLICATION_FACTOR, locatedBlock.getLocations().length);
    if (clientRack != null) {
      assertEquals("First datanode should be rack local", clientRack,
          locatedBlock.getLocations()[0].getNetworkLocation());
    }
    nameNodeRpc.abandonBlock(locatedBlock.getBlock(), fileStatus.getFileId(),
        src, clientMachine);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestDefaultBlockPlacementPolicy.java

示例14: createClientDatanodeProtocolProxy

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
    DatanodeID datanodeid, Configuration conf, int socketTimeout,
    boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
  final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
  InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
  }
  
  // Since we're creating a new UserGroupInformation here, we know that no
  // future RPC proxies will be able to re-use the same connection. And
  // usages of this proxy tend to be one-off calls.
  //
  // This is a temporary fix: callers should really achieve this by using
  // RPC.stopProxy() on the resulting object, but this is currently not
  // working in trunk. See the discussion on HDFS-1965.
  Configuration confWithNoIpcIdle = new Configuration(conf);
  confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
      .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);

  UserGroupInformation ticket = UserGroupInformation
      .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
  ticket.addToken(locatedBlock.getBlockToken());
  return createClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle,
      NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:ClientDatanodeProtocolTranslatorPB.java

示例15: addBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入依赖的package包/类
@Override
public AddBlockResponseProto addBlock(RpcController controller,
    AddBlockRequestProto req) throws ServiceException {
  
  try {
    List<DatanodeInfoProto> excl = req.getExcludeNodesList();
    List<String> favor = req.getFavoredNodesList();
    LocatedBlock result = server.addBlock(
        req.getSrc(),
        req.getClientName(),
        req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null,
        (excl == null || excl.size() == 0) ? null : PBHelper.convert(excl
            .toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(),
        (favor == null || favor.size() == 0) ? null : favor
            .toArray(new String[favor.size()]));
    return AddBlockResponseProto.newBuilder()
        .setBlock(PBHelper.convert(result)).build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:ClientNamenodeProtocolServerSideTranslatorPB.java


注:本文中的org.apache.hadoop.hdfs.protocol.LocatedBlock类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。