当前位置: 首页>>代码示例>>Java>>正文


Java NamenodeProtocols.addBlock方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols.addBlock方法的典型用法代码示例。如果您正苦于以下问题:Java NamenodeProtocols.addBlock方法的具体用法?Java NamenodeProtocols.addBlock怎么用?Java NamenodeProtocols.addBlock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols的用法示例。


在下文中一共展示了NamenodeProtocols.addBlock方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testOpenFilesWithRename

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestOpenFilesWithSnapshot.java

示例2: testAddBlockRetryShouldReturnBlockWithLocations

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations()
    throws Exception {
  final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  // create file
  nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
      (short) 3, 1024, null);
  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertTrue("Block locations should be present",
      lb1.getLocations().length > 0);

  cluster.restartNameNode();
  nameNodeRpc = cluster.getNameNodeRpc();
  LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
  assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestAddBlockRetry.java

示例3: testOpenFilesWithRename

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      HdfsConstants.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestOpenFilesWithSnapshot.java

示例4: testAddBlockRetryShouldReturnBlockWithLocations

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations()
    throws Exception {
  final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  // create file
  nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
      (short) 3, 1024, null);
  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
      HdfsConstants.GRANDFATHER_INODE_ID, null);
  assertTrue("Block locations should be present",
      lb1.getLocations().length > 0);

  cluster.restartNameNode();
  nameNodeRpc = cluster.getNameNodeRpc();
  LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
      HdfsConstants.GRANDFATHER_INODE_ID, null);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
  assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestAddBlockRetry.java

示例5: testRetryAddBlockWhileInChooseTarget

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Retry addBlock() while another thread is in chooseTarget().
 * See HDFS-4452.
 */
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
  final String src = "/testRetryAddBlockWhileInChooseTarget";

  final FSNamesystem ns = cluster.getNamesystem();
  final NamenodeProtocols nn = cluster.getNameNodeRpc();

  // create file
  nn.create(src, FsPermission.getFileDefault(),
      "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
      true, (short)3, 1024, null);

  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock[] onRetryBlock = new LocatedBlock[1];
  DatanodeStorageInfo targets[] = ns.getNewBlockTargets(
      src, INodeId.GRANDFATHER_INODE_ID, "clientName",
      null, null, null, onRetryBlock);
  assertNotNull("Targets must be generated", targets);

  // run second addBlock()
  LOG.info("Starting second addBlock for " + src);
  nn.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertTrue("Penultimate block must be complete",
      checkFileProgress(src, false));
  LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
  LocatedBlock lb2 = lbs.get(0);
  assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);

  // continue first addBlock()
  LocatedBlock newBlock = ns.storeAllocatedBlock(
      src, INodeId.GRANDFATHER_INODE_ID, "clientName", null, targets);
  assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());

  // check locations
  lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
  LocatedBlock lb1 = lbs.get(0);
  assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestAddBlockRetry.java

示例6: testRetryAddBlockWhileInChooseTarget

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Retry addBlock() while another thread is in chooseTarget().
 * See HDFS-4452.
 */
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
  final String src = "/testRetryAddBlockWhileInChooseTarget";

  final FSNamesystem ns = cluster.getNamesystem();
  final NamenodeProtocols nn = cluster.getNameNodeRpc();

  // create file
  nn.create(src, FsPermission.getFileDefault(),
      "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
      true, (short)3, 1024, null);

  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock[] onRetryBlock = new LocatedBlock[1];
  ns.readLock();
  FSDirWriteFileOp.ValidateAddBlockResult r;
  FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
  try {
    r = FSDirWriteFileOp.validateAddBlock(ns, pc, src,
                                          HdfsConstants.GRANDFATHER_INODE_ID,
                                          "clientName", null, onRetryBlock);
  } finally {
    ns.readUnlock();;
  }
  DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
      ns.getBlockManager(), src, null, null, r);
  assertNotNull("Targets must be generated", targets);

  // run second addBlock()
  LOG.info("Starting second addBlock for " + src);
  nn.addBlock(src, "clientName", null, null,
              HdfsConstants.GRANDFATHER_INODE_ID, null);
  assertTrue("Penultimate block must be complete",
             checkFileProgress(src, false));
  LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
  LocatedBlock lb2 = lbs.get(0);
  assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);

  // continue first addBlock()
  ns.writeLock();
  LocatedBlock newBlock;
  try {
    newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
        HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
  } finally {
    ns.writeUnlock();
  }
  assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());

  // check locations
  lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
  LocatedBlock lb1 = lbs.get(0);
  assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:64,代码来源:TestAddBlockRetry.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols.addBlock方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。