当前位置: 首页>>代码示例>>Java>>正文


Java LocatedBlock.getLocations方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LocatedBlock.getLocations方法的典型用法代码示例。如果您正苦于以下问题:Java LocatedBlock.getLocations方法的具体用法?Java LocatedBlock.getLocations怎么用?Java LocatedBlock.getLocations使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.LocatedBlock的用法示例。


在下文中一共展示了LocatedBlock.getLocations方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createAFileWithCorruptedBlockReplicas

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Create a file with one block and corrupt some/all of the block replicas.
 */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
    int corruptBlockCount) throws IOException, AccessControlException,
    FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
  DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
  DFSTestUtil.waitReplication(dfs, filePath, repl);
  // Locate the file blocks by asking name node
  final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
  Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
  // The file only has one block
  LocatedBlock lblock = locatedblocks.get(0);
  DatanodeInfo[] datanodeinfos = lblock.getLocations();
  ExtendedBlock block = lblock.getBlock();
  // corrupt some /all of the block replicas
  for (int i = 0; i < corruptBlockCount; i++) {
    DatanodeInfo dninfo = datanodeinfos[i];
    final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
    corruptBlock(block, dn);
    LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
        + dninfo);

  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestClientReportBadBlock.java

示例2: addBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private ExtendedBlock addBlocks(String fileName, String clientName)
throws IOException {
  ExtendedBlock prevBlock = null;
  for(int jdx = 0; jdx < blocksPerFile; jdx++) {
    LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName,
        prevBlock, null, INodeId.GRANDFATHER_INODE_ID, null);
    prevBlock = loc.getBlock();
    for(DatanodeInfo dnInfo : loc.getLocations()) {
      int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
      datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
      ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
          loc.getBlock().getLocalBlock(),
          ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          datanodes[dnIdx].storage.getStorageID(), rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
          .getBlock().getBlockPoolId(), report);
    }
  }
  return prevBlock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:NNThroughputBenchmark.java

示例3: triggerFailure

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * go to each block on the 2nd DataNode until it fails...
 * @param path
 * @param size
 * @throws IOException
 */
private void triggerFailure(String path, long size) throws IOException {
  NamenodeProtocols nn = cluster.getNameNodeRpc();
  List<LocatedBlock> locatedBlocks =
    nn.getBlockLocations(path, 0, size).getLocatedBlocks();
  
  for (LocatedBlock lb : locatedBlocks) {
    DatanodeInfo dinfo = lb.getLocations()[1];
    ExtendedBlock b = lb.getBlock();
    try {
      accessBlock(dinfo, lb);
    } catch (IOException e) {
      System.out.println("Failure triggered, on block: " + b.getBlockId() +  
          "; corresponding volume should be removed by now");
      break;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestDataNodeVolumeFailure.java

示例4: countNNBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Count datanodes that have copies of the blocks for a file
 * put it into the map
 * @param map
 * @param path
 * @param size
 * @return
 * @throws IOException
 */
private int countNNBlocks(Map<String, BlockLocs> map, String path, long size) 
  throws IOException {
  int total = 0;
  
  NamenodeProtocols nn = cluster.getNameNodeRpc();
  List<LocatedBlock> locatedBlocks = 
    nn.getBlockLocations(path, 0, size).getLocatedBlocks();
  //System.out.println("Number of blocks: " + locatedBlocks.size()); 
      
  for(LocatedBlock lb : locatedBlocks) {
    String blockId = ""+lb.getBlock().getBlockId();
    //System.out.print(blockId + ": ");
    DatanodeInfo[] dn_locs = lb.getLocations();
    BlockLocs bl = map.get(blockId);
    if(bl == null) {
      bl = new BlockLocs();
    }
    //System.out.print(dn_info.name+",");
    total += dn_locs.length;        
    bl.num_locs += dn_locs.length;
    map.put(blockId, bl);
    //System.out.println();
  }
  return total;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestDataNodeVolumeFailure.java

示例5: getBestNodeDNAddrPair

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Get the best node from which to stream the data.
 * @param block LocatedBlock, containing nodes in priority order.
 * @param ignoredNodes Do not choose nodes in this array (may be null)
 * @return The DNAddrPair of the best node.
 * @throws IOException
 */
private DNAddrPair getBestNodeDNAddrPair(LocatedBlock block,
    Collection<DatanodeInfo> ignoredNodes) throws IOException {
  DatanodeInfo[] nodes = block.getLocations();
  StorageType[] storageTypes = block.getStorageTypes();
  DatanodeInfo chosenNode = null;
  StorageType storageType = null;
  if (nodes != null) {
    for (int i = 0; i < nodes.length; i++) {
      if (!deadNodes.containsKey(nodes[i])
          && (ignoredNodes == null || !ignoredNodes.contains(nodes[i]))) {
        chosenNode = nodes[i];
        // Storage types are ordered to correspond with nodes, so use the same
        // index to get storage type.
        if (storageTypes != null && i < storageTypes.length) {
          storageType = storageTypes[i];
        }
        break;
      }
    }
  }
  if (chosenNode == null) {
    throw new IOException("No live nodes contain block " + block.getBlock() +
        " after checking nodes = " + Arrays.toString(nodes) +
        ", ignoredNodes = " + ignoredNodes);
  }
  final String dnAddr =
      chosenNode.getXferAddr(dfsClient.getConf().connectToDnViaHostname);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
  }
  InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
  return new DNAddrPair(chosenNode, targetAddr, storageType);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:DFSInputStream.java

示例6: compare

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private void compare(LocatedBlock expected, LocatedBlock actual) {
  assertEquals(expected.getBlock(), actual.getBlock());
  compare(expected.getBlockToken(), actual.getBlockToken());
  assertEquals(expected.getStartOffset(), actual.getStartOffset());
  assertEquals(expected.isCorrupt(), actual.isCorrupt());
  DatanodeInfo [] ei = expected.getLocations();
  DatanodeInfo [] ai = actual.getLocations();
  assertEquals(ei.length, ai.length);
  for (int i = 0; i < ei.length ; i++) {
    compare(ei[i], ai[i]);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestPBHelper.java

示例7: convert

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
public static LocatedBlockProto convert(LocatedBlock b) {
  if (b == null) return null;
  Builder builder = LocatedBlockProto.newBuilder();
  DatanodeInfo[] locs = b.getLocations();
  List<DatanodeInfo> cachedLocs =
      Lists.newLinkedList(Arrays.asList(b.getCachedLocations()));
  for (int i = 0; i < locs.length; i++) {
    DatanodeInfo loc = locs[i];
    builder.addLocs(i, PBHelper.convert(loc));
    boolean locIsCached = cachedLocs.contains(loc);
    builder.addIsCached(locIsCached);
    if (locIsCached) {
      cachedLocs.remove(loc);
    }
  }
  Preconditions.checkArgument(cachedLocs.size() == 0,
      "Found additional cached replica locations that are not in the set of"
      + " storage-backed locations!");

  StorageType[] storageTypes = b.getStorageTypes();
  if (storageTypes != null) {
    for (int i = 0; i < storageTypes.length; ++i) {
      builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i]));
    }
  }
  final String[] storageIDs = b.getStorageIDs();
  if (storageIDs != null) {
    builder.addAllStorageIDs(Arrays.asList(storageIDs));
  }

  return builder.setB(PBHelper.convert(b.getBlock()))
      .setBlockToken(PBHelper.convert(b.getBlockToken()))
      .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:PBHelper.java

示例8: waitForBlockReplication

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private void waitForBlockReplication(String filename, 
                                     ClientProtocol namenode,
                                     int expected, long maxWaitSec) 
                                     throws IOException {
  long start = Time.monotonicNow();
  
  //wait for all the blocks to be replicated;
  LOG.info("Checking for block replication for " + filename);
  
  LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
  assertEquals(numBlocks, blocks.locatedBlockCount());
  
  for (int i = 0; i < numBlocks; ++i) {
    LOG.info("Checking for block:" + (i+1));
    while (true) { // Loop to check for block i (usually when 0 is done all will be done
      blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
      assertEquals(numBlocks, blocks.locatedBlockCount());
      LocatedBlock block = blocks.get(i);
      int actual = block.getLocations().length;
      if ( actual == expected ) {
        LOG.info("Got enough replicas for " + (i+1) + "th block " + block.getBlock() +
            ", got " + actual + ".");
        break;
      }
      LOG.info("Not enough replicas for " + (i+1) + "th block " + block.getBlock() +
                             " yet. Expecting " + expected + ", got " + 
                             actual + ".");
    
      if (maxWaitSec > 0 && 
          (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
        throw new IOException("Timedout while waiting for all blocks to " +
                              " be replicated for " + filename);
      }
    
      try {
        Thread.sleep(500);
      } catch (InterruptedException ignored) {}
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestInjectionForSimulatedStorage.java

示例9: reorderBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src)
    throws IOException {

  ServerName sn = DefaultWALProvider.getServerNameFromWALDirectoryName(conf, src);
  if (sn == null) {
    // It's not an WAL
    return;
  }

  // Ok, so it's an WAL
  String hostName = sn.getHostname();
  if (LOG.isTraceEnabled()) {
    LOG.trace(src +
        " is an WAL file, so reordering blocks, last hostname will be:" + hostName);
  }

  // Just check for all blocks
  for (LocatedBlock lb : lbs.getLocatedBlocks()) {
    DatanodeInfo[] dnis = lb.getLocations();
    if (dnis != null && dnis.length > 1) {
      boolean found = false;
      for (int i = 0; i < dnis.length - 1 && !found; i++) {
        if (hostName.equals(dnis[i].getHostName())) {
          // advance the other locations by one and put this one at the last place.
          DatanodeInfo toLast = dnis[i];
          System.arraycopy(dnis, i + 1, dnis, i, dnis.length - i - 1);
          dnis[dnis.length - 1] = toLast;
          found = true;
        }
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:HFileSystem.java

示例10: getBlocksOnRack

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private Set<ExtendedBlock> getBlocksOnRack(List<LocatedBlock> blks, String rack) {
  Set<ExtendedBlock> ret = new HashSet<ExtendedBlock>();
  for (LocatedBlock blk : blks) {
    for (DatanodeInfo di : blk.getLocations()) {
      if (rack.equals(NetworkTopology.getFirstHalf(di.getNetworkLocation()))) {
        ret.add(blk.getBlock());
        break;
      }
    }
  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestBalancerWithNodeGroup.java

示例11: nextBlockOutputStream

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Open a DataOutputStream to a DataNode so that it can be written to.
 * This happens when a file is created and each time a new block is allocated.
 * Must get block ID and the IDs of the destinations from the namenode.
 * Returns the list of target datanodes.
 */
private LocatedBlock nextBlockOutputStream() throws IOException {
  LocatedBlock lb = null;
  DatanodeInfo[] nodes = null;
  StorageType[] storageTypes = null;
  int count = dfsClient.getConf().nBlockWriteRetry;
  boolean success = false;
  ExtendedBlock oldBlock = block;
  do {
    hasError = false;
    lastException.set(null);
    errorIndex = -1;
    success = false;

    DatanodeInfo[] excluded =
        excludedNodes.getAllPresent(excludedNodes.asMap().keySet())
        .keySet()
        .toArray(new DatanodeInfo[0]);
    block = oldBlock;
    lb = locateFollowingBlock(excluded.length > 0 ? excluded : null);
    block = lb.getBlock();
    block.setNumBytes(0);
    bytesSent = 0;
    accessToken = lb.getBlockToken();
    nodes = lb.getLocations();
    storageTypes = lb.getStorageTypes();

    //
    // Connect to first DataNode in the list.
    //
    success = createBlockOutputStream(nodes, storageTypes, 0L, false);

    if (!success) {
      DFSClient.LOG.info("Abandoning " + block);
      dfsClient.namenode.abandonBlock(block, fileId, src,
          dfsClient.clientName);
      block = null;
      DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]);
      excludedNodes.put(nodes[errorIndex], nodes[errorIndex]);
    }
  } while (!success && --count >= 0);

  if (!success) {
    throw new IOException("Unable to create new block.");
  }
  return lb;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:DFSOutputStream.java

示例12: fetchLocatedBlocksAndGetLastBlockLength

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private long fetchLocatedBlocksAndGetLastBlockLength() throws IOException {
  final LocatedBlocks newInfo = dfsClient.getLocatedBlocks(src, 0);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("newInfo = " + newInfo);
  }
  if (newInfo == null) {
    throw new IOException("Cannot open filename " + src);
  }

  if (locatedBlocks != null) {
    Iterator<LocatedBlock> oldIter = locatedBlocks.getLocatedBlocks().iterator();
    Iterator<LocatedBlock> newIter = newInfo.getLocatedBlocks().iterator();
    while (oldIter.hasNext() && newIter.hasNext()) {
      if (! oldIter.next().getBlock().equals(newIter.next().getBlock())) {
        throw new IOException("Blocklist for " + src + " has changed!");
      }
    }
  }
  locatedBlocks = newInfo;
  long lastBlockBeingWrittenLength = 0;
  if (!locatedBlocks.isLastBlockComplete()) {
    final LocatedBlock last = locatedBlocks.getLastLocatedBlock();
    if (last != null) {
      if (last.getLocations().length == 0) {
        if (last.getBlockSize() == 0) {
          // if the length is zero, then no data has been written to
          // datanode. So no need to wait for the locations.
          return 0;
        }
        return -1;
      }
      final long len = readBlockLength(last);
      last.getBlock().setNumBytes(len);
      lastBlockBeingWrittenLength = len; 
    }
  }

  fileEncryptionInfo = locatedBlocks.getFileEncryptionInfo();

  return lastBlockBeingWrittenLength;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:DFSInputStream.java

示例13: chooseDataNode

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private DNAddrPair chooseDataNode(LocatedBlock block,
    Collection<DatanodeInfo> ignoredNodes) throws IOException {
  while (true) {
    try {
      return getBestNodeDNAddrPair(block, ignoredNodes);
    } catch (IOException ie) {
      String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
        deadNodes, ignoredNodes);
      String blockInfo = block.getBlock() + " file=" + src;
      if (failures >= dfsClient.getMaxBlockAcquireFailures()) {
        String description = "Could not obtain block: " + blockInfo;
        DFSClient.LOG.warn(description + errMsg
            + ". Throwing a BlockMissingException");
        throw new BlockMissingException(src, description,
            block.getStartOffset());
      }

      DatanodeInfo[] nodes = block.getLocations();
      if (nodes == null || nodes.length == 0) {
        DFSClient.LOG.info("No node available for " + blockInfo);
      }
      DFSClient.LOG.info("Could not obtain " + block.getBlock()
          + " from any node: " + ie + errMsg
          + ". Will get new block locations from namenode and retry...");
      try {
        // Introducing a random factor to the wait time before another retry.
        // The wait time is dependent on # of failures and a random factor.
        // At the first time of getting a BlockMissingException, the wait time
        // is a random number between 0..3000 ms. If the first retry
        // still fails, we will wait 3000 ms grace period before the 2nd retry.
        // Also at the second retry, the waiting window is expanded to 6000 ms
        // alleviating the request rate from the server. Similarly the 3rd retry
        // will wait 6000ms grace period before retry and the waiting window is
        // expanded to 9000ms. 
        final int timeWindow = dfsClient.getConf().timeWindow;
        double waitTime = timeWindow * failures +       // grace period for the last round of attempt
          timeWindow * (failures + 1) * DFSUtil.getRandom().nextDouble(); // expanding time window for each failure
        DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " IOException, will wait for " + waitTime + " msec.");
        Thread.sleep((long)waitTime);
      } catch (InterruptedException iex) {
      }
      deadNodes.clear(); //2nd option is to remove only nodes[blockId]
      openInfo();
      block = getBlockAt(block.getStartOffset());
      failures++;
      continue;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:DFSInputStream.java

示例14: Pipeline

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
Pipeline(LocatedBlock lb) {
  for(DatanodeInfo d : lb.getLocations()) {
    datanodes.add(d.getName());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:Pipeline.java

示例15: getDataNode

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Get a DataNode that serves our testBlock.
 */
public DataNode getDataNode(LocatedBlock testBlock) {
  DatanodeInfo[] nodes = testBlock.getLocations();
  int ipcport = nodes[0].getIpcPort();
  return cluster.getDataNode(ipcport);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:BlockReaderTestUtil.java


注:本文中的org.apache.hadoop.hdfs.protocol.LocatedBlock.getLocations方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。