当前位置: 首页>>代码示例>>Java>>正文


Java NameNodeAdapter.getBlockLocations方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getBlockLocations方法的典型用法代码示例。如果您正苦于以下问题:Java NameNodeAdapter.getBlockLocations方法的具体用法?Java NameNodeAdapter.getBlockLocations怎么用?Java NameNodeAdapter.getBlockLocations使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter的用法示例。


在下文中一共展示了NameNodeAdapter.getBlockLocations方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRenameWithOverwrite

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Check the blocks of dst file are cleaned after rename with overwrite
 * Restart NN to check the rename successfully
 */
@Test(timeout = 120000)
public void testRenameWithOverwrite() throws Exception {
  final short replFactor = 2;
  final long blockSize = 512;
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
      numDataNodes(replFactor).build();
  DistributedFileSystem dfs = cluster.getFileSystem();
  try {
    
    long fileLen = blockSize*3;
    String src = "/foo/src";
    String dst = "/foo/dst";
    Path srcPath = new Path(src);
    Path dstPath = new Path(dst);
    
    DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1);
    DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1);
    
    LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(
        cluster.getNameNode(), dst, 0, fileLen);
    BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()).
        getBlockManager();
    assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
        getLocalBlock()) != null);
    dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
    assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
        getLocalBlock()) == null);
    
    // Restart NN and check the rename successfully
    cluster.restartNameNodes();
    assertFalse(dfs.exists(srcPath));
    assertTrue(dfs.exists(dstPath));
  } finally {
    if (dfs != null) {
      dfs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestDFSRename.java

示例2: testReplicationError

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Test that when there is a failure replicating a block the temporary
 * and meta files are cleaned up and subsequent replication succeeds.
 */
@Test
public void testReplicationError() throws Exception {
  // create a file of replication factor of 1
  final Path fileName = new Path("/test.txt");
  final int fileLen = 1;
  DFSTestUtil.createFile(fs, fileName, 1, (short)1, 1L);
  DFSTestUtil.waitReplication(fs, fileName, (short)1);

  // get the block belonged to the created file
  LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(
      cluster.getNameNode(), fileName.toString(), 0, (long)fileLen);
  assertEquals("Should only find 1 block", blocks.locatedBlockCount(), 1);
  LocatedBlock block = blocks.get(0);

  // bring up a second datanode
  cluster.startDataNodes(conf, 1, true, null, null);
  cluster.waitActive();
  final int sndNode = 1;
  DataNode datanode = cluster.getDataNodes().get(sndNode);
  
  // replicate the block to the second datanode
  InetSocketAddress target = datanode.getXferAddress();
  Socket s = new Socket(target.getAddress(), target.getPort());
  // write the header.
  DataOutputStream out = new DataOutputStream(s.getOutputStream());

  DataChecksum checksum = DataChecksum.newDataChecksum(
      DataChecksum.Type.CRC32, 512);
  new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
      BlockTokenSecretManager.DUMMY_TOKEN, "",
      new DatanodeInfo[0], new StorageType[0], null,
      BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
      checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
  out.flush();

  // close the connection before sending the content of the block
  out.close();

  // the temporary block & meta files should be deleted
  String bpid = cluster.getNamesystem().getBlockPoolId();
  File storageDir = cluster.getInstanceStorageDir(sndNode, 0);
  File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  storageDir = cluster.getInstanceStorageDir(sndNode, 1);
  File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
    Thread.sleep(100);
  }

  // then increase the file's replication factor
  fs.setReplication(fileName, (short)2);
  // replication should succeed
  DFSTestUtil.waitReplication(fs, fileName, (short)1);

  // clean up the file
  fs.delete(fileName, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:61,代码来源:TestDiskError.java

示例3: testDecommissionWithOpenfile

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
@Test(timeout=120000)
public void testDecommissionWithOpenfile() throws IOException, InterruptedException {
  LOG.info("Starting test testDecommissionWithOpenfile");
  
  //At most 4 nodes will be decommissioned
  startCluster(1, 7, conf);
      
  FileSystem fileSys = cluster.getFileSystem(0);
  FSNamesystem ns = cluster.getNamesystem(0);
  
  String openFile = "/testDecommissionWithOpenfile.dat";
         
  writeFile(fileSys, new Path(openFile), (short)3);   
  // make sure the file was open for write
  FSDataOutputStream fdos =  fileSys.append(new Path(openFile)); 
  
  LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(0), openFile, 0, fileSize);
            
  DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
  DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations();
  
  ArrayList<String> nodes = new ArrayList<String>();
  ArrayList<DatanodeInfo> dnInfos = new ArrayList<DatanodeInfo>();

  DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
  for (DatanodeInfo datanodeInfo : dnInfos4FirstBlock) {
    DatanodeInfo found = datanodeInfo;
    for (DatanodeInfo dif: dnInfos4LastBlock) {
      if (datanodeInfo.equals(dif)) {
       found = null;
      }
    }
    if (found != null) {
      nodes.add(found.getXferAddr());
      dnInfos.add(dm.getDatanode(found));
    }
  }
  //decommission one of the 3 nodes which have last block
  nodes.add(dnInfos4LastBlock[0].getXferAddr());
  dnInfos.add(dm.getDatanode(dnInfos4LastBlock[0]));
  
  writeConfigFile(excludeFile, nodes);
  refreshNodes(ns, conf);  
  for (DatanodeInfo dn : dnInfos) {
    waitNodeState(dn, AdminStates.DECOMMISSIONED);
  }           

  fdos.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestDecommission.java

示例4: testReplicationError

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Test that when there is a failure replicating a block the temporary
 * and meta files are cleaned up and subsequent replication succeeds.
 */
@Test
public void testReplicationError() throws Exception {
  // create a file of replication factor of 1
  final Path fileName = new Path("/test.txt");
  final int fileLen = 1;
  DFSTestUtil.createFile(fs, fileName, 1, (short)1, 1L);
  DFSTestUtil.waitReplication(fs, fileName, (short)1);

  // get the block belonged to the created file
  LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(
      cluster.getNameNode(), fileName.toString(), 0, (long)fileLen);
  assertEquals("Should only find 1 block", blocks.locatedBlockCount(), 1);
  LocatedBlock block = blocks.get(0);

  // bring up a second datanode
  cluster.startDataNodes(conf, 1, true, null, null);
  cluster.waitActive();
  final int sndNode = 1;
  DataNode datanode = cluster.getDataNodes().get(sndNode);
  FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(datanode);

  // replicate the block to the second datanode
  InetSocketAddress target = datanode.getXferAddress();
  Socket s = new Socket(target.getAddress(), target.getPort());
  // write the header.
  DataOutputStream out = new DataOutputStream(s.getOutputStream());

  DataChecksum checksum = DataChecksum.newDataChecksum(
      DataChecksum.Type.CRC32, 512);
  new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
      BlockTokenSecretManager.DUMMY_TOKEN, "",
      new DatanodeInfo[0], new StorageType[0], null,
      BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
      checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
  out.flush();

  // close the connection before sending the content of the block
  out.close();

  // the temporary block & meta files should be deleted
  String bpid = cluster.getNamesystem().getBlockPoolId();
  while (utils.getStoredReplicas(bpid).hasNext()) {
    Thread.sleep(100);
  }

  // then increase the file's replication factor
  fs.setReplication(fileName, (short)2);
  // replication should succeed
  DFSTestUtil.waitReplication(fs, fileName, (short)1);

  // clean up the file
  fs.delete(fileName, false);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:58,代码来源:TestDiskError.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getBlockLocations方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。