当前位置: 首页>>代码示例>>Java>>正文


Java NamenodeProtocols.getBlockLocations方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols.getBlockLocations方法的典型用法代码示例。如果您正苦于以下问题:Java NamenodeProtocols.getBlockLocations方法的具体用法?Java NamenodeProtocols.getBlockLocations怎么用?Java NamenodeProtocols.getBlockLocations使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols的用法示例。


在下文中一共展示了NamenodeProtocols.getBlockLocations方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testGetBlockLocations

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoContiguousUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestBlockUnderConstruction.java

示例2: testGetBlockLocations

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final BlockManager blockManager = cluster.getNamesystem().getBlockManager();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertFalse(blockManager.getStoredBlock(b).isComplete());

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:TestBlockUnderConstruction.java

示例3: testGetBlockLocations

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:32,代码来源:TestBlockUnderConstruction.java

示例4: testGetBlockLocations

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for (int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:32,代码来源:TestBlockUnderConstruction.java

示例5: chooseDatanode

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:62,代码来源:NamenodeWebHdfsMethods.java

示例6: testRetryAddBlockWhileInChooseTarget

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Retry addBlock() while another thread is in chooseTarget().
 * See HDFS-4452.
 */
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
  final String src = "/testRetryAddBlockWhileInChooseTarget";

  final FSNamesystem ns = cluster.getNamesystem();
  final NamenodeProtocols nn = cluster.getNameNodeRpc();

  // create file
  nn.create(src, FsPermission.getFileDefault(),
      "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
      true, (short)3, 1024, null);

  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock[] onRetryBlock = new LocatedBlock[1];
  DatanodeStorageInfo targets[] = ns.getNewBlockTargets(
      src, INodeId.GRANDFATHER_INODE_ID, "clientName",
      null, null, null, onRetryBlock);
  assertNotNull("Targets must be generated", targets);

  // run second addBlock()
  LOG.info("Starting second addBlock for " + src);
  nn.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertTrue("Penultimate block must be complete",
      checkFileProgress(src, false));
  LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
  LocatedBlock lb2 = lbs.get(0);
  assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);

  // continue first addBlock()
  LocatedBlock newBlock = ns.storeAllocatedBlock(
      src, INodeId.GRANDFATHER_INODE_ID, "clientName", null, targets);
  assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());

  // check locations
  lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
  LocatedBlock lb1 = lbs.get(0);
  assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestAddBlockRetry.java

示例7: chooseDatanode

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  FSNamesystem fsn = namenode.getNamesystem();
  if (fsn == null) {
    throw new IOException("Namesystem has not been intialized yet.");
  }
  final BlockManager bm = fsn.getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:66,代码来源:NamenodeWebHdfsMethods.java

示例8: testRetryAddBlockWhileInChooseTarget

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Retry addBlock() while another thread is in chooseTarget().
 * See HDFS-4452.
 */
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
  final String src = "/testRetryAddBlockWhileInChooseTarget";

  final FSNamesystem ns = cluster.getNamesystem();
  final NamenodeProtocols nn = cluster.getNameNodeRpc();

  // create file
  nn.create(src, FsPermission.getFileDefault(),
      "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
      true, (short)3, 1024, null);

  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock[] onRetryBlock = new LocatedBlock[1];
  ns.readLock();
  FSDirWriteFileOp.ValidateAddBlockResult r;
  FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
  try {
    r = FSDirWriteFileOp.validateAddBlock(ns, pc, src,
                                          HdfsConstants.GRANDFATHER_INODE_ID,
                                          "clientName", null, onRetryBlock);
  } finally {
    ns.readUnlock();;
  }
  DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
      ns.getBlockManager(), src, null, null, r);
  assertNotNull("Targets must be generated", targets);

  // run second addBlock()
  LOG.info("Starting second addBlock for " + src);
  nn.addBlock(src, "clientName", null, null,
              HdfsConstants.GRANDFATHER_INODE_ID, null);
  assertTrue("Penultimate block must be complete",
             checkFileProgress(src, false));
  LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
  LocatedBlock lb2 = lbs.get(0);
  assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);

  // continue first addBlock()
  ns.writeLock();
  LocatedBlock newBlock;
  try {
    newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
        HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
  } finally {
    ns.writeUnlock();
  }
  assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());

  // check locations
  lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
  LocatedBlock lb1 = lbs.get(0);
  assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:64,代码来源:TestAddBlockRetry.java

示例9: chooseDatanode

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, Configuration conf) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy()
          .chooseTarget(path, 1, clientNode,
              new ArrayList<DatanodeDescriptor>(), false, null, blocksize);
      if (datanodes.length > 0) {
        return datanodes[0];
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = namenode.getRpcServer();
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return JspHelper.bestNode(locations.get(0).getLocations(), false, conf);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:48,代码来源:NamenodeWebHdfsMethods.java

示例10: chooseDatanode

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
static DatanodeInfo chooseDatanode(final NameNode namenode, final String path,
    final HttpOpParam.Op op, final long openOffset, final long blocksize,
    Configuration conf) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode =
        bm.getDatanodeManager().getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy()
          .chooseTarget(path, 1, clientNode,
              new ArrayList<DatanodeDescriptor>(), false, null, blocksize);
      if (datanodes.length > 0) {
        return datanodes[0];
      }
    }
  } else if (op == GetOpParam.Op.OPEN ||
      op == GetOpParam.Op.GETFILECHECKSUM || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = namenode.getRpcServer();
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException(
            "Offset=" + openOffset + " out of the range [0, " + len + "); " +
                op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return JspHelper
            .bestNode(locations.get(0).getLocations(), false, conf);
      }
    }
  }

  return (DatanodeDescriptor) bm.getDatanodeManager().getNetworkTopology()
      .chooseRandom(NodeBase.ROOT);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:49,代码来源:NamenodeWebHdfsMethods.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols.getBlockLocations方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。