当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.getFileBlockLocations方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.getFileBlockLocations方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.getFileBlockLocations方法的具体用法?Java FileSystem.getFileBlockLocations怎么用?Java FileSystem.getFileBlockLocations使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.getFileBlockLocations方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestSmallBlock.java

示例2: waitForBlocks

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void waitForBlocks(FileSystem fileSys, Path name)
  throws IOException {
  // wait until we have at least one block in the file to read.
  boolean done = false;

  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {
    }
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, blockSize);
    if (locations.length < 1) {
      done = false;
      continue;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestFileConcurrentReader.java

示例3: getBlockLocationsOutput

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static BlockLocation[] getBlockLocationsOutput(int fileSize,
    int blockSize, long start, long len, String blockLocationHost)
    throws Exception {
  Configuration conf = new Configuration();
  conf.set(NativeAzureFileSystem.AZURE_BLOCK_SIZE_PROPERTY_NAME, ""
      + blockSize);
  if (blockLocationHost != null) {
    conf.set(NativeAzureFileSystem.AZURE_BLOCK_LOCATION_HOST_PROPERTY_NAME,
        blockLocationHost);
  }
  AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount
      .createMock(conf);
  FileSystem fs = testAccount.getFileSystem();
  Path testFile = createTestFile(fs, fileSize);
  FileStatus stat = fs.getFileStatus(testFile);
  BlockLocation[] locations = fs.getFileBlockLocations(stat, start, len);
  testAccount.cleanup();
  return locations;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestNativeAzureFileSystemBlockLocations.java

示例4: getLocations

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public String[] getLocations() throws IOException {
  HashSet<String> hostSet = new HashSet<String>();
  for (Path file : getPaths()) {
    FileSystem fs = file.getFileSystem(getJob());
    FileStatus status = fs.getFileStatus(file);
    BlockLocation[] blkLocations = fs.getFileBlockLocations(status,
                                        0, status.getLen());
    if (blkLocations != null && blkLocations.length > 0) {
      addToSet(hostSet, blkLocations[0].getHosts());
    }
  }
  return hostSet.toArray(new String[hostSet.size()]);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:MultiFileSplit.java

示例5: getFileBlockLocations

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected BlockLocation[] getFileBlockLocations(
  FileSystem fs, FileStatus stat) throws IOException {
  if (stat instanceof LocatedFileStatus) {
    return ((LocatedFileStatus) stat).getBlockLocations();
  }
  return fs.getFileBlockLocations(stat, 0, stat.getLen());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:CombineFileInputFormat.java

示例6: waitReplication

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Wait for the given file to reach the given replication factor.
 * @throws TimeoutException if we fail to sufficiently replicate the file
 */
public static void waitReplication(FileSystem fs, Path fileName, short replFactor)
    throws IOException, InterruptedException, TimeoutException {
  boolean correctReplFactor;
  final int ATTEMPTS = 40;
  int count = 0;

  do {
    correctReplFactor = true;
    BlockLocation locs[] = fs.getFileBlockLocations(
      fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
    count++;
    for (int j = 0; j < locs.length; j++) {
      String[] hostnames = locs[j].getNames();
      if (hostnames.length != replFactor) {
        correctReplFactor = false;
        System.out.println("Block " + j + " of file " + fileName
            + " has replication factor " + hostnames.length
            + " (desired " + replFactor + "); locations "
            + Joiner.on(' ').join(hostnames));
        Thread.sleep(1000);
        break;
      }
    }
    if (correctReplFactor) {
      System.out.println("All blocks of file " + fileName
          + " verified to have replication factor " + replFactor);
    }
  } while (!correctReplFactor && count < ATTEMPTS);

  if (count == ATTEMPTS) {
    throw new TimeoutException("Timed out waiting for " + fileName +
        " to reach " + replFactor + " replicas");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:DFSTestUtil.java

示例7: checkFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {;}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
    if (locations.length < AppendTestUtil.NUM_BLOCKS) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  byte[] expected = 
      new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    System.arraycopy(fileContents, 0, expected, 0, expected.length);
  }
  // do a sanity check. Read the file
  // do not check file status since the file is not yet closed.
  AppendTestUtil.checkFullFile(fileSys, name,
      AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
      expected, "Read 1", false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestFileAppend.java

示例8: checkGetBlockLocationsWorks

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
void checkGetBlockLocationsWorks(FileSystem fs, Path fileName) throws IOException {
  FileStatus stat = fs.getFileStatus(fileName);
  try {  
    fs.getFileBlockLocations(stat, 0, 1000);
  } catch (SafeModeException e) {
    assertTrue("Should have not got safemode exception", false);
  } catch (RemoteException re) {
    assertTrue("Should have not got safemode exception", false);   
  }    
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestSafeMode.java

示例9: computeHDFSBlocksDistribution

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Compute HDFS blocks distribution of a given file, or a portion of the file
 * @param fs file system
 * @param status file status of the file
 * @param start start position of the portion
 * @param length length of the portion
 * @return The HDFS blocks distribution
 */
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
  final FileSystem fs, FileStatus status, long start, long length)
  throws IOException {
  HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
  BlockLocation [] blockLocations =
    fs.getFileBlockLocations(status, start, length);
  for(BlockLocation bl : blockLocations) {
    String [] hosts = bl.getHosts();
    long len = bl.getLength();
    blocksDistribution.addHostsAndBlockWeight(hosts, len);
  }

  return blocksDistribution;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:FSUtils.java

示例10: getFileBlockLocations

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected BlockLocation[] getFileBlockLocations(
  FileSystem fs, FileStatus stat) throws IOException {
  return fs.getFileBlockLocations(stat, 0, stat.getLen());
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:5,代码来源:CombineFileInputFormat.java

示例11: testLocality

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void testLocality() throws Exception {
  JobConf job = new JobConf(conf);
  dfs = newDFSCluster(job);
  FileSystem fs = dfs.getFileSystem();
  System.out.println("FileSystem " + fs.getUri());

  Path inputDir = new Path("/foo/");
  String fileName = "part-0000";
  createInputs(fs, inputDir, fileName);

  // split it using a file input format
  TextInputFormat.addInputPath(job, inputDir);
  TextInputFormat inFormat = new TextInputFormat();
  inFormat.configure(job);
  InputSplit[] splits = inFormat.getSplits(job, 1);
  FileStatus fileStatus = fs.getFileStatus(new Path(inputDir, fileName));
  BlockLocation[] locations =
    fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
  System.out.println("Made splits");

  // make sure that each split is a block and the locations match
  for(int i=0; i < splits.length; ++i) {
    FileSplit fileSplit = (FileSplit) splits[i];
    System.out.println("File split: " + fileSplit);
    for (String h: fileSplit.getLocations()) {
      System.out.println("Location: " + h);
    }
    System.out.println("Block: " + locations[i]);
    assertEquals(locations[i].getOffset(), fileSplit.getStart());
    assertEquals(locations[i].getLength(), fileSplit.getLength());
    String[] blockLocs = locations[i].getHosts();
    String[] splitLocs = fileSplit.getLocations();
    assertEquals(2, blockLocs.length);
    assertEquals(2, splitLocs.length);
    assertTrue((blockLocs[0].equals(splitLocs[0]) &&
                blockLocs[1].equals(splitLocs[1])) ||
               (blockLocs[1].equals(splitLocs[0]) &&
                blockLocs[0].equals(splitLocs[1])));
  }

  assertEquals("Expected value of " + FileInputFormat.NUM_INPUT_FILES,
               1, job.getLong(FileInputFormat.NUM_INPUT_FILES, 0));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestMRCJCFileInputFormat.java

示例12: OneFileInfo

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
OneFileInfo(FileStatus stat, Configuration conf,
            boolean isSplitable,
            HashMap<String, List<OneBlockInfo>> rackToBlocks,
            HashMap<OneBlockInfo, String[]> blockToNodes,
            HashMap<String, Set<OneBlockInfo>> nodeToBlocks,
            HashMap<String, Set<String>> rackToNodes,
            long maxSize)
            throws IOException {
  this.fileSize = 0;

  // get block locations from file system
  BlockLocation[] locations;
  if (stat instanceof LocatedFileStatus) {
    locations = ((LocatedFileStatus) stat).getBlockLocations();
  } else {
    FileSystem fs = stat.getPath().getFileSystem(conf);
    locations = fs.getFileBlockLocations(stat, 0, stat.getLen());
  }
  // create a list of all block and their locations
  if (locations == null) {
    blocks = new OneBlockInfo[0];
  } else {

    if(locations.length == 0 && !stat.isDirectory()) {
      locations = new BlockLocation[] { new BlockLocation() };
    }

    if (!isSplitable) {
      // if the file is not splitable, just create the one block with
      // full file length
      blocks = new OneBlockInfo[1];
      fileSize = stat.getLen();
      blocks[0] = new OneBlockInfo(stat.getPath(), 0, fileSize,
          locations[0].getHosts(), locations[0].getTopologyPaths());
    } else {
      ArrayList<OneBlockInfo> blocksList = new ArrayList<OneBlockInfo>(
          locations.length);
      for (int i = 0; i < locations.length; i++) {
        fileSize += locations[i].getLength();

        // each split can be a maximum of maxSize
        long left = locations[i].getLength();
        long myOffset = locations[i].getOffset();
        long myLength = 0;
        do {
          if (maxSize == 0) {
            myLength = left;
          } else {
            if (left > maxSize && left < 2 * maxSize) {
              // if remainder is between max and 2*max - then
              // instead of creating splits of size max, left-max we
              // create splits of size left/2 and left/2. This is
              // a heuristic to avoid creating really really small
              // splits.
              myLength = left / 2;
            } else {
              myLength = Math.min(maxSize, left);
            }
          }
          OneBlockInfo oneblock = new OneBlockInfo(stat.getPath(),
              myOffset, myLength, locations[i].getHosts(),
              locations[i].getTopologyPaths());
          left -= myLength;
          myOffset += myLength;

          blocksList.add(oneblock);
        } while (left > 0);
      }
      blocks = blocksList.toArray(new OneBlockInfo[blocksList.size()]);
    }
    
    populateBlockInfo(blocks, rackToBlocks, blockToNodes, 
                      nodeToBlocks, rackToNodes);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:CombineFileInputFormat.java

示例13: testHostsExcludeInUI

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testHostsExcludeInUI() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  // Two blocks and four racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decommission one of the hosts with the block, this should cause 
    // the block to get replicated to another host on the same rack,
    // otherwise the rack policy is violated.
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    String name = locs[0].getNames()[0];
    String names = name + "\n" + "localhost:42\n";
    LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
    DFSTestUtil.writeFile(localFileSys, excludeFile, name);
    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
    DFSTestUtil.waitForDecommission(fs, name);

    // Check the block still has sufficient # replicas across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName =
        new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
    String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
    assertTrue("Live nodes should contain the decommissioned node",
        nodes.contains("Decommissioned"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:TestHostsFiles.java

示例14: testNodeDecomissionRespectsRackPolicy

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testNodeDecomissionRespectsRackPolicy() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  // Two blocks and four racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decommission one of the hosts with the block, this should cause 
    // the block to get replicated to another host on the same rack,
    // otherwise the rack policy is violated.
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    String name = locs[0].getNames()[0];
    DFSTestUtil.writeFile(localFileSys, excludeFile, name);
    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
    DFSTestUtil.waitForDecommission(fs, name);

    // Check the block still has sufficient # replicas across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:TestBlocksWithNotEnoughRacks.java

示例15: testNodeDecomissionWithOverreplicationRespectsRackPolicy

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() 
    throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 5;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());

  // All hosts are on two racks, only one host on /rack2
  String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Lower the replication factor so the blocks are over replicated
    REPLICATION_FACTOR = 2;
    fs.setReplication(filePath, REPLICATION_FACTOR);

    // Decommission one of the hosts with the block that is not on
    // the lone host on rack2 (if we decomission that host it would
    // be impossible to respect the rack policy).
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    for (String top : locs[0].getTopologyPaths()) {
      if (!top.startsWith("/rack2")) {
        String name = top.substring("/rack1".length()+1);
        DFSTestUtil.writeFile(localFileSys, excludeFile, name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        break;
      }
    }

    // Check the block still has sufficient # replicas across racks,
    // ie we didn't remove the replica on the host on /rack1.
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestBlocksWithNotEnoughRacks.java


注:本文中的org.apache.hadoop.fs.FileSystem.getFileBlockLocations方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。