當前位置: 首頁>>代碼示例>>Java>>正文


Java PathFilter.accept方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.PathFilter.accept方法的典型用法代碼示例。如果您正苦於以下問題:Java PathFilter.accept方法的具體用法?Java PathFilter.accept怎麽用?Java PathFilter.accept使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.PathFilter的用法示例。


在下文中一共展示了PathFilter.accept方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: traverseImpl

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
private static ArrayList<FileStatus> traverseImpl(Storage storage, Path path, PathFilter filter)
    throws IOException {
  if (!storage.exists(path.toString())) {
    return new ArrayList<>();
  }
  ArrayList<FileStatus> result = new ArrayList<>();
  FileStatus[] statuses = storage.listStatus(path.toString());
  for (FileStatus status : statuses) {
    if (status.isDirectory()) {
      result.addAll(traverseImpl(storage, status.getPath(), filter));
    } else {
      if (filter.accept(status.getPath())) {
        result.add(status);
      }
    }
  }
  return result;
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:19,代碼來源:FileUtils.java

示例2: addInputPathRecursively

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
/**
 * Add files in the input path recursively into the results.
 * @param result
 *          The List to store all files.
 * @param fs
 *          The FileSystem.
 * @param path
 *          The input path.
 * @param inputFilter
 *          The input filter that can be used to filter files/dirs. 
 * @throws IOException
 */
protected void addInputPathRecursively(List<FileStatus> result,
    FileSystem fs, Path path, PathFilter inputFilter) 
    throws IOException {
  RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(path);
  while (iter.hasNext()) {
    LocatedFileStatus stat = iter.next();
    if (inputFilter.accept(stat.getPath())) {
      if (stat.isDirectory()) {
        addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
      } else {
        result.add(stat);
      }
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:28,代碼來源:FileInputFormat.java

示例3: scanDirectory

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc,
    PathFilter pathFilter) throws IOException {
  path = fc.makeQualified(path);
  List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
  try {
    RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
    while (fileStatusIter.hasNext()) {
      FileStatus fileStatus = fileStatusIter.next();
      Path filePath = fileStatus.getPath();
      if (fileStatus.isFile() && pathFilter.accept(filePath)) {
        jhStatusList.add(fileStatus);
      }
    }
  } catch (FileNotFoundException fe) {
    LOG.error("Error while scanning directory " + path, fe);
  }
  return jhStatusList;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:HistoryFileManager.java

示例4: publishPlainDataStatistics

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
static DataStatistics publishPlainDataStatistics(Configuration conf, 
                                                 Path inputDir) 
throws IOException {
  FileSystem fs = inputDir.getFileSystem(conf);

  // obtain input data file statuses
  long dataSize = 0;
  long fileCount = 0;
  RemoteIterator<LocatedFileStatus> iter = fs.listFiles(inputDir, true);
  PathFilter filter = new Utils.OutputFileUtils.OutputFilesFilter();
  while (iter.hasNext()) {
    LocatedFileStatus lStatus = iter.next();
    if (filter.accept(lStatus.getPath())) {
      dataSize += lStatus.getLen();
      ++fileCount;
    }
  }

  // publish the plain data statistics
  LOG.info("Total size of input data : " 
           + StringUtils.humanReadableInt(dataSize));
  LOG.info("Total number of input data files : " + fileCount);
  
  return new DataStatistics(dataSize, fileCount, false);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:GenerateData.java

示例5: mergeFilters

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
/**
 * Will merge given array of filters into one.
 * If given array of filters is empty, will return {@link #DUMMY_FILTER}.
 *
 * @param filters array of filters
 * @return one filter that combines all given filters
 */
public static PathFilter mergeFilters(final PathFilter... filters) {
  if (filters.length == 0) {
    return DUMMY_FILTER;
  }

  return new PathFilter() {
    @Override
    public boolean accept(Path path) {
      for (PathFilter filter : filters) {
        if (!filter.accept(path)) {
          return false;
        }
      }
      return true;
    }
  };
}
 
開發者ID:axbaretto,項目名稱:drill,代碼行數:25,代碼來源:FileSystemUtil.java

示例6: listStatus

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
@Override
public FileStatus[] listStatus(String path, PathFilter filter) throws IOException {
  if (failure == Failure.listStatusFailure) {
    failure = Failure.noFailure;
    throw new IOException("listStatus failed.");
  }
  List<FileStatus> result = new ArrayList<>();
  for (String key: data.keySet()) {
    if (key.startsWith(path) && filter.accept(new Path(key))) {
        FileStatus status = new FileStatus(data.get(key).size(), false, 1, 0, 0, 0, null, null, null, new Path(key));
        result.add(status);
    }
  }
  return result.toArray(new FileStatus[result.size()]);
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:16,代碼來源:MemoryStorage.java

示例7: accept

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
public boolean accept(Path path) {
  for (PathFilter filter : filters) {
    if (filter.accept(path)) {
      return true;
    }
  }
  return false;
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:9,代碼來源:CombineFileInputFormat.java

示例8: listFilteredStatus

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
private static List<FileStatus> listFilteredStatus(FileContext fc, Path root,
    PathFilter filter) throws IOException {
  List<FileStatus> fsList = remoteIterToList(fc.listStatus(root));
  if (filter == null) {
    return fsList;
  } else {
    List<FileStatus> filteredList = new LinkedList<FileStatus>();
    for (FileStatus fs : fsList) {
      if (filter.accept(fs.getPath())) {
        filteredList.add(fs);
      }
    }
    return filteredList;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:16,代碼來源:JobHistoryUtils.java

示例9: accept

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
public boolean accept(Path path) {
  for (PathFilter filter : filters) {
    if (!filter.accept(path)) {
      return false;
    }
  }
  return true;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:FileInputFormat.java

示例10: singleThreadedListStatus

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
private List<FileStatus> singleThreadedListStatus(JobConf job, Path[] dirs,
    PathFilter inputFilter, boolean recursive) throws IOException {
  List<FileStatus> result = new ArrayList<FileStatus>();
  List<IOException> errors = new ArrayList<IOException>();
  for (Path p: dirs) {
    FileSystem fs = p.getFileSystem(job); 
    FileStatus[] matches = fs.globStatus(p, inputFilter);
    if (matches == null) {
      errors.add(new IOException("Input path does not exist: " + p));
    } else if (matches.length == 0) {
      errors.add(new IOException("Input Pattern " + p + " matches 0 files"));
    } else {
      for (FileStatus globStat: matches) {
        if (globStat.isDirectory()) {
          RemoteIterator<LocatedFileStatus> iter =
              fs.listLocatedStatus(globStat.getPath());
          while (iter.hasNext()) {
            LocatedFileStatus stat = iter.next();
            if (inputFilter.accept(stat.getPath())) {
              if (recursive && stat.isDirectory()) {
                addInputPathRecursively(result, fs, stat.getPath(),
                    inputFilter);
              } else {
                result.add(stat);
              }
            }
          }
        } else {
          result.add(globStat);
        }
      }
    }
  }
  if (!errors.isEmpty()) {
    throw new InvalidInputException(errors);
  }
  return result;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:39,代碼來源:FileInputFormat.java

示例11: singleThreadedListStatus

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
private List<FileStatus> singleThreadedListStatus(JobContext job, Path[] dirs,
    PathFilter inputFilter, boolean recursive) throws IOException {
  List<FileStatus> result = new ArrayList<FileStatus>();
  List<IOException> errors = new ArrayList<IOException>();
  for (int i=0; i < dirs.length; ++i) {
    Path p = dirs[i];
    FileSystem fs = p.getFileSystem(job.getConfiguration()); 
    FileStatus[] matches = fs.globStatus(p, inputFilter);
    if (matches == null) {
      errors.add(new IOException("Input path does not exist: " + p));
    } else if (matches.length == 0) {
      errors.add(new IOException("Input Pattern " + p + " matches 0 files"));
    } else {
      for (FileStatus globStat: matches) {
        if (globStat.isDirectory()) {
          RemoteIterator<LocatedFileStatus> iter =
              fs.listLocatedStatus(globStat.getPath());
          while (iter.hasNext()) {
            LocatedFileStatus stat = iter.next();
            if (inputFilter.accept(stat.getPath())) {
              if (recursive && stat.isDirectory()) {
                addInputPathRecursively(result, fs, stat.getPath(),
                    inputFilter);
              } else {
                result.add(stat);
              }
            }
          }
        } else {
          result.add(globStat);
        }
      }
    }
  }

  if (!errors.isEmpty()) {
    throw new InvalidInputException(errors);
  }
  return result;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:41,代碼來源:FileInputFormat.java

示例12: accept

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
public boolean accept(Path path) {
    for (PathFilter filter : filters) {
        if (!filter.accept(path)) {
            return false;
        }
    }
    return true;
}
 
開發者ID:marklogic,項目名稱:marklogic-contentpump,代碼行數:9,代碼來源:FileAndDirectoryInputFormat.java

示例13: listStatus

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
@Override
public FileStatus[] listStatus(Path path, PathFilter filter) throws IOException {
    List<FileStatus> subPaths = pathsAndContent.get(path.toString());
    List<FileStatus> filteredSubPaths = new ArrayList<>();
    for (FileStatus subPath : subPaths) {
        if (filter.accept(subPath.getPath())) {
            filteredSubPaths.add(fakeFileStatus(subPath.getPath().toString()));
        }
    }
    return filteredSubPaths.toArray(new FileStatus[filteredSubPaths.size()]);
}
 
開發者ID:EXASOL,項目名稱:hadoop-etl-udfs,代碼行數:12,代碼來源:HCatMetadataServiceTest.java

示例14: archiveRegion

import org.apache.hadoop.fs.PathFilter; //導入方法依賴的package包/類
/**
 * Remove an entire region from the table directory via archiving the region's hfiles.
 * @param fs {@link FileSystem} from which to remove the region
 * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
 *          the archive path)
 * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
 * @param regionDir {@link Path} to where a region is being stored (for building the archive path)
 * @return <tt>true</tt> if the region was sucessfully deleted. <tt>false</tt> if the filesystem
 *         operations could not complete.
 * @throws IOException if the request cannot be completed
 */
public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
    throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("ARCHIVING " + regionDir.toString());
  }

  // otherwise, we archive the files
  // make sure we can archive
  if (tableDir == null || regionDir == null) {
    LOG.error("No archive directory could be found because tabledir (" + tableDir
        + ") or regiondir (" + regionDir + "was null. Deleting files instead.");
    deleteRegionWithoutArchiving(fs, regionDir);
    // we should have archived, but failed to. Doesn't matter if we deleted
    // the archived files correctly or not.
    return false;
  }

  // make sure the regiondir lives under the tabledir
  Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
  Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir,
      FSUtils.getTableName(tableDir),
      regionDir.getName());

  FileStatusConverter getAsFile = new FileStatusConverter(fs);
  // otherwise, we attempt to archive the store files

  // build collection of just the store directories to archive
  Collection<File> toArchive = new ArrayList<File>();
  final PathFilter dirFilter = new FSUtils.DirFilter(fs);
  PathFilter nonHidden = new PathFilter() {
    @Override
    public boolean accept(Path file) {
      return dirFilter.accept(file) && !file.getName().toString().startsWith(".");
    }
  };
  FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden);
  // if there no files, we can just delete the directory and return;
  if (storeDirs == null) {
    LOG.debug("Region directory (" + regionDir + ") was empty, just deleting and returning!");
    return deleteRegionWithoutArchiving(fs, regionDir);
  }

  // convert the files in the region to a File
  toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
  LOG.debug("Archiving " + toArchive);
  boolean success = false;
  try {
    success = resolveAndArchive(fs, regionArchiveDir, toArchive);
  } catch (IOException e) {
    LOG.error("Failed to archive " + toArchive, e);
    success = false;
  }

  // if that was successful, then we delete the region
  if (success) {
    return deleteRegionWithoutArchiving(fs, regionDir);
  }

  throw new IOException("Received error when attempting to archive files (" + toArchive
      + "), cannot delete region directory. ");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:73,代碼來源:HFileArchiver.java


注:本文中的org.apache.hadoop.fs.PathFilter.accept方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。