当前位置: 首页>>代码示例>>Java>>正文


Java RemoteIterator.hasNext方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.RemoteIterator.hasNext方法的典型用法代码示例。如果您正苦于以下问题:Java RemoteIterator.hasNext方法的具体用法?Java RemoteIterator.hasNext怎么用?Java RemoteIterator.hasNext使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.RemoteIterator的用法示例。


在下文中一共展示了RemoteIterator.hasNext方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: assertFileCount

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
/**
 * Assert that the number of log files in the target directory is as expected.
 * @param fs the target FileSystem
 * @param dir the target directory path
 * @param expected the expected number of files
 * @throws IOException thrown if listing files fails
 */
public void assertFileCount(FileSystem fs, Path dir, int expected)
    throws IOException {
  RemoteIterator<LocatedFileStatus> i = fs.listFiles(dir, true);
  int count = 0;

  while (i.hasNext()) {
    i.next();
    count++;
  }

  assertTrue("The sink created additional unexpected log files. " + count
      + "files were created", expected >= count);
  assertTrue("The sink created too few log files. " + count + "files were "
      + "created", expected <= count);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:23,代码来源:RollingFileSystemSinkTestBase.java

示例2: scanDirectory

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc,
    PathFilter pathFilter) throws IOException {
  path = fc.makeQualified(path);
  List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
  try {
    RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
    while (fileStatusIter.hasNext()) {
      FileStatus fileStatus = fileStatusIter.next();
      Path filePath = fileStatus.getPath();
      if (fileStatus.isFile() && pathFilter.accept(filePath)) {
        jhStatusList.add(fileStatus);
      }
    }
  } catch (FileNotFoundException fe) {
    LOG.error("Error while scanning directory " + path, fe);
  }
  return jhStatusList;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:HistoryFileManager.java

示例3: checkNamenodeBeforeReturn

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(
            new CacheDirectiveInfo.Builder().
                setPool(directive.getPool()).
                setPath(directive.getPath()).
                build());
    if (iter.hasNext()) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestRetryCacheWithHA.java

示例4: checkEquals

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
private static void checkEquals(RemoteIterator<LocatedFileStatus> i1,
    RemoteIterator<LocatedFileStatus> i2) throws IOException {
  while (i1.hasNext()) {
    assertTrue(i2.hasNext());
    
    // Compare all the fields but the path name, which is relative
    // to the original path from listFiles.
    LocatedFileStatus l1 = i1.next();
    LocatedFileStatus l2 = i2.next();
    assertEquals(l1.getAccessTime(), l2.getAccessTime());
    assertEquals(l1.getBlockSize(), l2.getBlockSize());
    assertEquals(l1.getGroup(), l2.getGroup());
    assertEquals(l1.getLen(), l2.getLen());
    assertEquals(l1.getModificationTime(), l2.getModificationTime());
    assertEquals(l1.getOwner(), l2.getOwner());
    assertEquals(l1.getPermission(), l2.getPermission());
    assertEquals(l1.getReplication(), l2.getReplication());
  }
  assertFalse(i2.hasNext());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestINodeFile.java

示例5: getORCRecords

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
public List<OrcStruct> getORCRecords(String storeBaseDir, String tableName) throws IOException {
  List<OrcStruct> orcrecords = new ArrayList<>();
  try {
    FileSystem fs = FileSystem.get(conf);
    Path storeBasePath = new Path(fs.getHomeDirectory(), storeBaseDir);
    Path tablePath = new Path(storeBasePath, tableName);
    if (fs.exists(tablePath)) {
      RemoteIterator<LocatedFileStatus> locatedFileStatusRemoteIterator =
          fs.listFiles(tablePath, false);
      while (locatedFileStatusRemoteIterator.hasNext()) {
        LocatedFileStatus next = locatedFileStatusRemoteIterator.next();
        final org.apache.hadoop.hive.ql.io.orc.Reader fis =
            OrcFile.createReader(next.getPath(), OrcFile.readerOptions(conf));
        RecordReader rows = fis.rows();
        while (rows.hasNext()) {
          orcrecords.add((OrcStruct) rows.next(null));
        }
        System.out.println("File name is " + next.getPath());
      }
    }
  } catch (IOException e) {
    e.printStackTrace();
  }
  return orcrecords;
}
 
开发者ID:ampool,项目名称:monarch,代码行数:26,代码来源:HDFSQuasiService.java

示例6: addInputPathRecursively

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
/**
 * Add files in the input path recursively into the results.
 * @param result
 *          The List to store all files.
 * @param fs
 *          The FileSystem.
 * @param path
 *          The input path.
 * @param inputFilter
 *          The input filter that can be used to filter files/dirs. 
 * @throws IOException
 */
protected void addInputPathRecursively(List<FileStatus> result,
    FileSystem fs, Path path, PathFilter inputFilter) 
    throws IOException {
  RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(path);
  while (iter.hasNext()) {
    LocatedFileStatus stat = iter.next();
    if (inputFilter.accept(stat.getPath())) {
      if (stat.isDirectory()) {
        addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
      } else {
        result.add(stat);
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:FileInputFormat.java

示例7: deleteLocalDir

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
private void deleteLocalDir(FileContext lfs, DeletionService del,
    String localDir) throws IOException {
  RemoteIterator<FileStatus> fileStatus = lfs.listStatus(new Path(localDir));
  if (fileStatus != null) {
    while (fileStatus.hasNext()) {
      FileStatus status = fileStatus.next();
      try {
        if (status.getPath().getName().matches(".*" +
            ContainerLocalizer.USERCACHE + "_DEL_.*")) {
          LOG.info("usercache path : " + status.getPath().toString());
          cleanUpFilesPerUserDir(lfs, del, status.getPath());
        } else if (status.getPath().getName()
            .matches(".*" + NM_PRIVATE_DIR + "_DEL_.*")
            ||
            status.getPath().getName()
                .matches(".*" + ContainerLocalizer.FILECACHE + "_DEL_.*")) {
          del.delete(null, status.getPath(), new Path[] {});
        }
      } catch (IOException ex) {
        // Do nothing, just give the warning
        LOG.warn("Failed to delete this local Directory: " +
            status.getPath().getName());
      }
    }
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:27,代码来源:ResourceLocalizationService.java

示例8: assertZonePresent

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
/**
 * Checks that an encryption zone with the specified keyName and path (if not
 * null) is present.
 *
 * @throws IOException if a matching zone could not be found
 */
public void assertZonePresent(String keyName, String path) throws IOException {
  final RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
  boolean match = false;
  while (it.hasNext()) {
    EncryptionZone zone = it.next();
    boolean matchKey = (keyName == null);
    boolean matchPath = (path == null);
    if (keyName != null && zone.getKeyName().equals(keyName)) {
      matchKey = true;
    }
    if (path != null && zone.getPath().equals(path)) {
      matchPath = true;
    }
    if (matchKey && matchPath) {
      match = true;
      break;
    }
  }
  assertTrue("Did not find expected encryption zone with keyName " + keyName +
          " path " + path, match
  );
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:29,代码来源:TestEncryptionZones.java

示例9: checkNamenodeBeforeReturn

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(
            new CacheDirectiveInfo.Builder().
              setPool(directive.getPool()).
              setPath(directive.getPath()).
              build());
    if (!iter.hasNext()) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:TestRetryCacheWithHA.java

示例10: literalAllSegments

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
public static void literalAllSegments(FileSystem fileSystem, Path dir, Consumer<LocatedFileStatus> consumer) throws IOException {
    RemoteIterator<LocatedFileStatus> files = fileSystem.listFiles(dir, true);
    while (files.hasNext()) {
        LocatedFileStatus fileStatus = files.next();
        if (!fileStatus.isFile()) {
            continue;
        }
        if (fileStatus.getLen() == 0) {
            continue;
        }

        Path path = fileStatus.getPath();
        if (checkSegmentByPath(path)) {
            consumer.accept(fileStatus);
        }
    }
}
 
开发者ID:shunfei,项目名称:indexr,代码行数:18,代码来源:SegmentHelper.java

示例11: enumerateDir

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
void enumerateDir() throws Exception {
	System.out.println("enumarate dir, path " + path);
	Configuration conf = new Configuration();
	FileSystem fs = FileSystem.get(conf); 

	int repfactor = 4;
	for (int k = 0; k < repfactor; k++) {
		long start = System.currentTimeMillis();
		for (int i = 0; i < size; i++) {
			// single operation == loop
			RemoteIterator<LocatedFileStatus> iter = fs.listFiles(path, false);
			while (iter.hasNext()) {
				iter.next();
			}
		}
		long end = System.currentTimeMillis();
		double executionTime = ((double) (end - start));
		double latency = executionTime * 1000.0 / ((double) size);
		System.out.println("execution time [ms] " + executionTime);
		System.out.println("latency [us] " + latency);
	}
	fs.close();
}
 
开发者ID:zrlio,项目名称:crail,代码行数:24,代码来源:HdfsIOBenchmark.java

示例12: run

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }

  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    final TableListing listing = new TableListing.Builder()
      .addField("").addField("", true)
      .wrapWidth(AdminHelper.MAX_LINE_WIDTH).hideHeaders().build();
    final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
    while (it.hasNext()) {
      EncryptionZone ez = it.next();
      listing.addRow(ez.getPath(), ez.getKeyName());
    }
    System.out.println(listing.toString());
  } catch (IOException e) {
    System.err.println(prettifyException(e));
    return 2;
  }

  return 0;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:CryptoAdmin.java

示例13: cleanUpFilesPerUserDir

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
private void cleanUpFilesPerUserDir(FileContext lfs, DeletionService del,
    Path userDirPath) throws IOException {
  RemoteIterator<FileStatus> userDirStatus = lfs.listStatus(userDirPath);
  FileDeletionTask dependentDeletionTask =
      del.createFileDeletionTask(null, userDirPath, new Path[] {});
  if (userDirStatus != null && userDirStatus.hasNext()) {
    List<FileDeletionTask> deletionTasks = new ArrayList<FileDeletionTask>();
    while (userDirStatus.hasNext()) {
      FileStatus status = userDirStatus.next();
      String owner = status.getOwner();
      FileDeletionTask deletionTask =
          del.createFileDeletionTask(owner, null,
            new Path[] { status.getPath() });
      deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
      deletionTasks.add(deletionTask);
    }
    for (FileDeletionTask task : deletionTasks) {
      del.scheduleFileDeletionTask(task);
    }
  } else {
    del.scheduleFileDeletionTask(dependentDeletionTask);
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:24,代码来源:ResourceLocalizationService.java

示例14: if

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
@Override
public RemoteIterator<LocatedFileStatus>listLocatedStatus(final Path f,
    final PathFilter filter) throws FileNotFoundException, IOException {
  final InodeTree.ResolveResult<FileSystem> res = fsState
      .resolve(getUriPath(f), true);
  final RemoteIterator<LocatedFileStatus> statusIter = res.targetFileSystem
      .listLocatedStatus(res.remainingPath);

  if (res.isInternalDir()) {
    return statusIter;
  }

  return new RemoteIterator<LocatedFileStatus>() {
    @Override
    public boolean hasNext() throws IOException {
      return statusIter.hasNext();
    }

    @Override
    public LocatedFileStatus next() throws IOException {
      final LocatedFileStatus status = statusIter.next();
      return (LocatedFileStatus)fixFileStatus(status,
          getChrootedPath(res, status, f));
    }
  };
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:ViewFileSystem.java

示例15: listStatusInternal

import org.apache.hadoop.fs.RemoteIterator; //导入方法依赖的package包/类
private FileStatus[] listStatusInternal(boolean located, Path dataPath) throws IOException {
  FileStatus[] dirPaths = new FileStatus[0];
  if (located) {
    RemoteIterator<LocatedFileStatus> statIter =
        fsView.listLocatedStatus(dataPath);
    ArrayList<LocatedFileStatus> tmp = new ArrayList<LocatedFileStatus>(10);
    while (statIter.hasNext()) {
      tmp.add(statIter.next());
    }
    dirPaths = tmp.toArray(dirPaths);
  } else {
    dirPaths = fsView.listStatus(dataPath);
  }
  return dirPaths;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:16,代码来源:ViewFileSystemBaseTest.java


注:本文中的org.apache.hadoop.fs.RemoteIterator.hasNext方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。