当前位置: 首页>>代码示例>>Java>>正文


Java RemoteIterator.next方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.RemoteIterator.next方法的典型用法代码示例。如果您正苦于以下问题:Java RemoteIterator.next方法的具体用法?Java RemoteIterator.next怎么用?Java RemoteIterator.next使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.RemoteIterator的用法示例。


在下文中一共展示了RemoteIterator.next方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: publishPlainDataStatistics

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
static DataStatistics publishPlainDataStatistics(Configuration conf, 
                                                 Path inputDir) 
throws IOException {
  FileSystem fs = inputDir.getFileSystem(conf);

  // obtain input data file statuses
  long dataSize = 0;
  long fileCount = 0;
  RemoteIterator<LocatedFileStatus> iter = fs.listFiles(inputDir, true);
  PathFilter filter = new Utils.OutputFileUtils.OutputFilesFilter();
  while (iter.hasNext()) {
    LocatedFileStatus lStatus = iter.next();
    if (filter.accept(lStatus.getPath())) {
      dataSize += lStatus.getLen();
      ++fileCount;
    }
  }

  // publish the plain data statistics
  LOG.info("Total size of input data : " 
           + StringUtils.humanReadableInt(dataSize));
  LOG.info("Total number of input data files : " + fileCount);
  
  return new DataStatistics(dataSize, fileCount, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:GenerateData.java

示例2: listCachePools

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private void listCachePools(
    HashSet<String> poolNames, int active) throws Exception {
  HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
  RemoteIterator<CachePoolEntry> pools = dfs.listCachePools();
  int poolCount = poolNames.size();
  for (int i=0; i<poolCount; i++) {
    CachePoolEntry pool = pools.next();
    String pollName = pool.getInfo().getPoolName();
    assertTrue("The pool name should be expected", tmpNames.remove(pollName));
    if (i % 2 == 0) {
      int standby = active;
      active = (standby == 0) ? 1 : 0;
      cluster.transitionToStandby(standby);
      cluster.transitionToActive(active);
      cluster.waitActive(active);
    }
  }
  assertTrue("All pools must be found", tmpNames.isEmpty());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestRetryCacheWithHA.java

示例3: testCopyRecursive

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@Test
public void testCopyRecursive() throws Throwable {
  int expected = createTestFiles(sourceDir, 64);

  expectSuccess(
      "-s", sourceDir.toURI().toString(),
      "-d", destDir.toURI().toString(),
      "-t", "4",
      "-l", "3");

  LocalFileSystem local = FileSystem.getLocal(new Configuration());
  Set<String> entries = new TreeSet<>();
  RemoteIterator<LocatedFileStatus> iterator
      = local.listFiles(new Path(destDir.toURI()), true);
  int count = 0;
  while (iterator.hasNext()) {
    LocatedFileStatus next = iterator.next();
    entries.add(next.getPath().toUri().toString());
    LOG.info("Entry {} size = {}", next.getPath(), next.getLen());
    count++;
  }
  assertEquals("Mismatch in files found", expected, count);

}
 
开发者ID:steveloughran,项目名称:cloudup,代码行数:25,代码来源:TestLocalCloudup.java

示例4: run

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }

  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    final TableListing listing = new TableListing.Builder()
      .addField("").addField("", true)
      .wrapWidth(AdminHelper.MAX_LINE_WIDTH).hideHeaders().build();
    final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
    while (it.hasNext()) {
      EncryptionZone ez = it.next();
      listing.addRow(ez.getPath(), ez.getKeyName());
    }
    System.out.println(listing.toString());
  } catch (IOException e) {
    System.err.println(prettifyException(e));
    return 2;
  }

  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:CryptoAdmin.java

示例5: assertListFilesFinds

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
/**
 * To get this project to compile under Hadoop 1, this code needs to be
 * commented out
 *
 *
 * @param fs filesystem
 * @param dir dir
 * @param subdir subdir
 * @param recursive recurse?
 * @throws IOException IO problems
 */
public static void assertListFilesFinds(FileSystem fs,
                                        Path dir,
                                        Path subdir,
                                        boolean recursive) throws IOException {
  RemoteIterator<LocatedFileStatus> iterator =
    fs.listFiles(dir, recursive);
  boolean found = false;
  int entries = 0;
  StringBuilder builder = new StringBuilder();
  while (iterator.hasNext()) {
    LocatedFileStatus next = iterator.next();
    entries++;
    builder.append(next.toString()).append('\n');
    if (next.getPath().equals(subdir)) {
      found = true;
    }
  }
  assertTrue("Path " + subdir
             + " not found in directory " + dir + " : "
             + " entries=" + entries
             + " content"
             + builder.toString(),
             found);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestV2LsOperations.java

示例6: listCacheDirectives

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private void listCacheDirectives(
    HashSet<String> poolNames, int active) throws Exception {
  HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
  RemoteIterator<CacheDirectiveEntry> directives = dfs.listCacheDirectives(null);
  int poolCount = poolNames.size();
  for (int i=0; i<poolCount; i++) {
    CacheDirectiveEntry directive = directives.next();
    String pollName = directive.getInfo().getPool();
    assertTrue("The pool name should be expected", tmpNames.remove(pollName));
    if (i % 2 == 0) {
      int standby = active;
      active = (standby == 0) ? 1 : 0;
      cluster.transitionToStandby(standby);
      cluster.transitionToActive(active);
      cluster.waitActive(active);
    }
  }
  assertTrue("All pools must be found", tmpNames.isEmpty());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestRetryCacheWithHA.java

示例7: scanDirectory

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc,
    PathFilter pathFilter) throws IOException {
  path = fc.makeQualified(path);
  List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
  try {
    RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
    while (fileStatusIter.hasNext()) {
      FileStatus fileStatus = fileStatusIter.next();
      Path filePath = fileStatus.getPath();
      if (fileStatus.isFile() && pathFilter.accept(filePath)) {
        jhStatusList.add(fileStatus);
      }
    }
  } catch (FileNotFoundException fe) {
    LOG.error("Error while scanning directory " + path, fe);
  }
  return jhStatusList;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:HistoryFileManager.java

示例8: testFile

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
/** Test when input path is a file */
@Test
public void testFile() throws IOException {
  fc.mkdir(TEST_DIR, FsPermission.getDefault(), true);
  writeFile(fc, FILE1, FILE_LEN);

  RemoteIterator<LocatedFileStatus> itor = fc.util().listFiles(
      FILE1, true);
  LocatedFileStatus stat = itor.next();
  assertFalse(itor.hasNext());
  assertTrue(stat.isFile());
  assertEquals(FILE_LEN, stat.getLen());
  assertEquals(fc.makeQualified(FILE1), stat.getPath());
  assertEquals(1, stat.getBlockLocations().length);
  
  itor = fc.util().listFiles(FILE1, false);
  stat = itor.next();
  assertFalse(itor.hasNext());
  assertTrue(stat.isFile());
  assertEquals(FILE_LEN, stat.getLen());
  assertEquals(fc.makeQualified(FILE1), stat.getPath());
  assertEquals(1, stat.getBlockLocations().length);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestListFilesInFileContext.java

示例9: readKeysToSearch

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
static SortedSet<byte []> readKeysToSearch(final Configuration conf)
throws IOException, InterruptedException {
  Path keysInputDir = new Path(conf.get(SEARCHER_INPUTDIR_KEY));
  FileSystem fs = FileSystem.get(conf);
  SortedSet<byte []> result = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
  if (!fs.exists(keysInputDir)) {
    throw new FileNotFoundException(keysInputDir.toString());
  }
  if (!fs.isDirectory(keysInputDir)) {
    throw new UnsupportedOperationException("TODO");
  } else {
    RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(keysInputDir, false);
    while(iterator.hasNext()) {
      LocatedFileStatus keyFileStatus = iterator.next();
      // Skip "_SUCCESS" file.
      if (keyFileStatus.getPath().getName().startsWith("_")) continue;
      result.addAll(readFileToSearch(conf, fs, keyFileStatus));
    }
  }
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:IntegrationTestBigLinkedList.java

示例10: addInputPathRecursively

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
/**
 * Add files in the input path recursively into the results.
 * @param result
 *          The List to store all files.
 * @param fs
 *          The FileSystem.
 * @param path
 *          The input path.
 * @param inputFilter
 *          The input filter that can be used to filter files/dirs. 
 * @throws IOException
 */
protected void addInputPathRecursively(List<FileStatus> result,
    FileSystem fs, Path path, PathFilter inputFilter) 
    throws IOException {
  RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(path);
  while (iter.hasNext()) {
    LocatedFileStatus stat = iter.next();
    if (inputFilter.accept(stat.getPath())) {
      if (stat.isDirectory()) {
        addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
      } else {
        result.add(stat);
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:FileInputFormat.java

示例11: call

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@Override
public Result call() throws Exception {
  Result result = new Result();
  result.fs = fs;

  if (fileStatus.isDirectory()) {
    RemoteIterator<LocatedFileStatus> iter = fs
        .listLocatedStatus(fileStatus.getPath());
    while (iter.hasNext()) {
      LocatedFileStatus stat = iter.next();
      if (inputFilter.accept(stat.getPath())) {
        if (recursive && stat.isDirectory()) {
          result.dirsNeedingRecursiveCalls.add(stat);
        } else {
          result.locatedFileStatuses.add(stat);
        }
      }
    }
  } else {
    result.locatedFileStatuses.add(fileStatus);
  }
  return result;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:LocatedFileStatusFetcher.java

示例12: deleteLocalDir

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
private void deleteLocalDir(FileContext lfs, DeletionService del,
    String localDir) throws IOException {
  RemoteIterator<FileStatus> fileStatus = lfs.listStatus(new Path(localDir));
  if (fileStatus != null) {
    while (fileStatus.hasNext()) {
      FileStatus status = fileStatus.next();
      try {
        if (status.getPath().getName().matches(".*" +
            ContainerLocalizer.USERCACHE + "_DEL_.*")) {
          LOG.info("usercache path : " + status.getPath().toString());
          cleanUpFilesPerUserDir(lfs, del, status.getPath());
        } else if (status.getPath().getName()
            .matches(".*" + NM_PRIVATE_DIR + "_DEL_.*")
            ||
            status.getPath().getName()
                .matches(".*" + ContainerLocalizer.FILECACHE + "_DEL_.*")) {
          del.delete(null, status.getPath(), new Path[] {});
        }
      } catch (IOException ex) {
        // Do nothing, just give the warning
        LOG.warn("Failed to delete this local Directory: " +
            status.getPath().getName());
      }
    }
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:27,代码来源:ResourceLocalizationService.java

示例13: testListStatusOnFile

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
/** Test the FileStatus obtained calling listStatus on a file */
@Test
public void testListStatusOnFile() throws IOException {
  FileStatus[] stats = fs.listStatus(file1);
  assertEquals(1, stats.length);
  FileStatus status = stats[0];
  assertFalse(file1 + " should be a file", status.isDirectory());
  assertEquals(blockSize, status.getBlockSize());
  assertEquals(1, status.getReplication());
  assertEquals(fileSize, status.getLen());
  assertEquals(file1.makeQualified(fs.getUri(), 
      fs.getWorkingDirectory()).toString(), 
      status.getPath().toString());
  
  RemoteIterator<FileStatus> itor = fc.listStatus(file1);
  status = itor.next();
  assertEquals(stats[0], status);
  assertFalse(file1 + " should be a file", status.isDirectory());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:TestFileStatus.java

示例14: assertZonePresent

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
/**
 * Checks that an encryption zone with the specified keyName and path (if not
 * null) is present.
 *
 * @throws IOException if a matching zone could not be found
 */
public void assertZonePresent(String keyName, String path) throws IOException {
  final RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
  boolean match = false;
  while (it.hasNext()) {
    EncryptionZone zone = it.next();
    boolean matchKey = (keyName == null);
    boolean matchPath = (path == null);
    if (keyName != null && zone.getKeyName().equals(keyName)) {
      matchKey = true;
    }
    if (path != null && zone.getPath().equals(path)) {
      matchPath = true;
    }
    if (matchKey && matchPath) {
      match = true;
      break;
    }
  }
  assertTrue("Did not find expected encryption zone with keyName " + keyName +
          " path " + path, match
  );
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:TestEncryptionZones.java

示例15: checkEquals

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
private static void checkEquals(RemoteIterator<LocatedFileStatus> i1,
    RemoteIterator<LocatedFileStatus> i2) throws IOException {
  while (i1.hasNext()) {
    assertTrue(i2.hasNext());
    
    // Compare all the fields but the path name, which is relative
    // to the original path from listFiles.
    LocatedFileStatus l1 = i1.next();
    LocatedFileStatus l2 = i2.next();
    assertEquals(l1.getAccessTime(), l2.getAccessTime());
    assertEquals(l1.getBlockSize(), l2.getBlockSize());
    assertEquals(l1.getGroup(), l2.getGroup());
    assertEquals(l1.getLen(), l2.getLen());
    assertEquals(l1.getModificationTime(), l2.getModificationTime());
    assertEquals(l1.getOwner(), l2.getOwner());
    assertEquals(l1.getPermission(), l2.getPermission());
    assertEquals(l1.getReplication(), l2.getReplication());
  }
  assertFalse(i2.hasNext());
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:21,代码来源:TestINodeFile.java


注:本文中的org.apache.hadoop.fs.RemoteIterator.next方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。