当前位置: 首页>>代码示例>>Java>>正文


Java LocatedBlock.getStorageTypes方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LocatedBlock.getStorageTypes方法的典型用法代码示例。如果您正苦于以下问题:Java LocatedBlock.getStorageTypes方法的具体用法?Java LocatedBlock.getStorageTypes怎么用?Java LocatedBlock.getStorageTypes使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.LocatedBlock的用法示例。


在下文中一共展示了LocatedBlock.getStorageTypes方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: verifyFile

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestStorageMover.java

示例2: getOrVerifyReplication

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
private Replication getOrVerifyReplication(Path file, Replication expected)
    throws IOException {
  final List<LocatedBlock> lbs = dfs.getClient().getLocatedBlocks(
      file.toString(), 0).getLocatedBlocks();
  Assert.assertEquals(1, lbs.size());

  LocatedBlock lb = lbs.get(0);
  StringBuilder types = new StringBuilder(); 
  final Replication r = new Replication();
  for(StorageType t : lb.getStorageTypes()) {
    types.append(t).append(", ");
    if (t == StorageType.DISK) {
      r.disk++;
    } else if (t == StorageType.ARCHIVE) {
      r.archive++;
    } else {
      Assert.fail("Unexpected storage type " + t);
    }
  }

  if (expected != null) {
    final String s = "file = " + file + "\n  types = [" + types + "]";
    Assert.assertEquals(s, expected, r);
  }
  return r;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestStorageMover.java

示例3: getBestNodeDNAddrPair

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Get the best node from which to stream the data.
 * @param block LocatedBlock, containing nodes in priority order.
 * @param ignoredNodes Do not choose nodes in this array (may be null)
 * @return The DNAddrPair of the best node.
 * @throws IOException
 */
private DNAddrPair getBestNodeDNAddrPair(LocatedBlock block,
    Collection<DatanodeInfo> ignoredNodes) throws IOException {
  DatanodeInfo[] nodes = block.getLocations();
  StorageType[] storageTypes = block.getStorageTypes();
  DatanodeInfo chosenNode = null;
  StorageType storageType = null;
  if (nodes != null) {
    for (int i = 0; i < nodes.length; i++) {
      if (!deadNodes.containsKey(nodes[i])
          && (ignoredNodes == null || !ignoredNodes.contains(nodes[i]))) {
        chosenNode = nodes[i];
        // Storage types are ordered to correspond with nodes, so use the same
        // index to get storage type.
        if (storageTypes != null && i < storageTypes.length) {
          storageType = storageTypes[i];
        }
        break;
      }
    }
  }
  if (chosenNode == null) {
    throw new IOException("No live nodes contain block " + block.getBlock() +
        " after checking nodes = " + Arrays.toString(nodes) +
        ", ignoredNodes = " + ignoredNodes);
  }
  final String dnAddr =
      chosenNode.getXferAddr(dfsClient.getConf().connectToDnViaHostname);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
  }
  InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
  return new DNAddrPair(chosenNode, targetAddr, storageType);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:DFSInputStream.java

示例4: convert

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
public static LocatedBlockProto convert(LocatedBlock b) {
  if (b == null) return null;
  Builder builder = LocatedBlockProto.newBuilder();
  DatanodeInfo[] locs = b.getLocations();
  List<DatanodeInfo> cachedLocs =
      Lists.newLinkedList(Arrays.asList(b.getCachedLocations()));
  for (int i = 0; i < locs.length; i++) {
    DatanodeInfo loc = locs[i];
    builder.addLocs(i, PBHelper.convert(loc));
    boolean locIsCached = cachedLocs.contains(loc);
    builder.addIsCached(locIsCached);
    if (locIsCached) {
      cachedLocs.remove(loc);
    }
  }
  Preconditions.checkArgument(cachedLocs.size() == 0,
      "Found additional cached replica locations that are not in the set of"
      + " storage-backed locations!");

  StorageType[] storageTypes = b.getStorageTypes();
  if (storageTypes != null) {
    for (int i = 0; i < storageTypes.length; ++i) {
      builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i]));
    }
  }
  final String[] storageIDs = b.getStorageIDs();
  if (storageIDs != null) {
    builder.addAllStorageIDs(Arrays.asList(storageIDs));
  }

  return builder.setB(PBHelper.convert(b.getBlock()))
      .setBlockToken(PBHelper.convert(b.getBlockToken()))
      .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:PBHelper.java

示例5: testFallbackToDiskPartial

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * File partially fit in RamDisk after eviction.
 * RamDisk can fit 2 blocks. Write a file with 5 blocks.
 * Expect 2 or less blocks are on RamDisk and 3 or more on disk.
 * @throws IOException
 */
@Test
public void testFallbackToDiskPartial()
  throws IOException, InterruptedException {
  startUpCluster(true, 2);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE * 5, true);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);

  triggerBlockReport();

  int numBlocksOnRamDisk = 0;
  int numBlocksOnDisk = 0;

  long fileLength = client.getFileInfo(path.toString()).getLen();
  LocatedBlocks locatedBlocks =
    client.getLocatedBlocks(path.toString(), 0, fileLength);
  for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
    if (locatedBlock.getStorageTypes()[0] == RAM_DISK) {
      numBlocksOnRamDisk++;
    } else if (locatedBlock.getStorageTypes()[0] == DEFAULT) {
      numBlocksOnDisk++;
    }
  }

  // Since eviction is asynchronous, depending on the timing of eviction
  // wrt writes, we may get 2 or less blocks on RAM disk.
  assert(numBlocksOnRamDisk <= 2);
  assert(numBlocksOnDisk >= 3);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestLazyPersistFiles.java

示例6: nextBlockOutputStream

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * Open a DataOutputStream to a DataNode so that it can be written to.
 * This happens when a file is created and each time a new block is allocated.
 * Must get block ID and the IDs of the destinations from the namenode.
 * Returns the list of target datanodes.
 */
private LocatedBlock nextBlockOutputStream() throws IOException {
  LocatedBlock lb = null;
  DatanodeInfo[] nodes = null;
  StorageType[] storageTypes = null;
  int count = dfsClient.getConf().nBlockWriteRetry;
  boolean success = false;
  ExtendedBlock oldBlock = block;
  do {
    hasError = false;
    lastException.set(null);
    errorIndex = -1;
    success = false;

    DatanodeInfo[] excluded =
        excludedNodes.getAllPresent(excludedNodes.asMap().keySet())
        .keySet()
        .toArray(new DatanodeInfo[0]);
    block = oldBlock;
    lb = locateFollowingBlock(excluded.length > 0 ? excluded : null);
    block = lb.getBlock();
    block.setNumBytes(0);
    bytesSent = 0;
    accessToken = lb.getBlockToken();
    nodes = lb.getLocations();
    storageTypes = lb.getStorageTypes();

    //
    // Connect to first DataNode in the list.
    //
    success = createBlockOutputStream(nodes, storageTypes, 0L, false);

    if (!success) {
      DFSClient.LOG.info("Abandoning " + block);
      dfsClient.namenode.abandonBlock(block, fileId, src,
          dfsClient.clientName);
      block = null;
      DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]);
      excludedNodes.put(nodes[errorIndex], nodes[errorIndex]);
    }
  } while (!success && --count >= 0);

  if (!success) {
    throw new IOException("Unable to create new block.");
  }
  return lb;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:DFSOutputStream.java

示例7: testSortLocatedBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlock; //导入方法依赖的package包/类
/**
 * This test creates a LocatedBlock with 5 locations, sorts the locations
 * based on the network topology, and ensures the locations are still aligned
 * with the storage ids and storage types.
 */
@Test
public void testSortLocatedBlocks() throws IOException {
  // create the DatanodeManager which will be tested
  FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
  Mockito.when(fsn.hasWriteLock()).thenReturn(true);
  DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, new Configuration());

  // register 5 datanodes, each with different storage ID and type
  DatanodeInfo[] locs = new DatanodeInfo[5];
  String[] storageIDs = new String[5];
  StorageType[] storageTypes = new StorageType[]{
      StorageType.ARCHIVE,
      StorageType.DEFAULT,
      StorageType.DISK,
      StorageType.RAM_DISK,
      StorageType.SSD
  };
  for(int i = 0; i < 5; i++) {
    // register new datanode
    String uuid = "UUID-"+i;
    String ip = "IP-" + i;
    DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
    Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
    Mockito.when(dr.getIpAddr()).thenReturn(ip);
    Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
    Mockito.when(dr.getXferPort()).thenReturn(9000);
    Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
    dm.registerDatanode(dr);

    // get location and storage information
    locs[i] = dm.getDatanode(uuid);
    storageIDs[i] = "storageID-"+i;
  }

  // set first 2 locations as decomissioned
  locs[0].setDecommissioned();
  locs[1].setDecommissioned();

  // create LocatedBlock with above locations
  ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
  LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
  List<LocatedBlock> blocks = new ArrayList<>();
  blocks.add(block);

  final String targetIp = locs[4].getIpAddr();

  // sort block locations
  dm.sortLocatedBlocks(targetIp, blocks);

  // check that storage IDs/types are aligned with datanode locs
  DatanodeInfo[] sortedLocs = block.getLocations();
  storageIDs = block.getStorageIDs();
  storageTypes = block.getStorageTypes();
  assertThat(sortedLocs.length, is(5));
  assertThat(storageIDs.length, is(5));
  assertThat(storageTypes.length, is(5));
  for(int i = 0; i < sortedLocs.length; i++) {
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
  }

  // Ensure the local node is first.
  assertThat(sortedLocs[0].getIpAddr(), is(targetIp));

  // Ensure the two decommissioned DNs were moved to the end.
  assertThat(sortedLocs[sortedLocs.length-1].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
  assertThat(sortedLocs[sortedLocs.length-2].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:77,代码来源:TestDatanodeManager.java


注:本文中的org.apache.hadoop.hdfs.protocol.LocatedBlock.getStorageTypes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。