本文整理汇总了Java中org.apache.commons.io.filefilter.FileFilterUtils.suffixFileFilter方法的典型用法代码示例。如果您正苦于以下问题:Java FileFilterUtils.suffixFileFilter方法的具体用法?Java FileFilterUtils.suffixFileFilter怎么用?Java FileFilterUtils.suffixFileFilter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.commons.io.filefilter.FileFilterUtils
的用法示例。
在下文中一共展示了FileFilterUtils.suffixFileFilter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: findGitRepos
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
public static List<String> findGitRepos(String dirPath) {
File dir = new File(dirPath);
IOFileFilter gitDirFilter = (IOFileFilter) FileFilterUtils.suffixFileFilter(".git");
IOFileFilter notFile = FileFilterUtils.notFileFilter(TrueFileFilter.INSTANCE);
IOFileFilter compositeFilter = FileFilterUtils.and(notFile, gitDirFilter);
List<File> files = (List<File>) FileUtils.listFilesAndDirs(dir,compositeFilter,DirectoryFileFilter.INSTANCE);
List<String> results = new ArrayList<String>();
for(File f: files) {
try {
if(!f.getCanonicalPath().endsWith("/.git"))
continue;
String gitStripped = f.getCanonicalPath().replace("/.git", "");
System.out.println(gitStripped);
results.add(gitStripped);
} catch (IOException e) {
e.printStackTrace();
}
}
return results;
}
示例2: main
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
/**
* @param args
*/
public static void main(final String[] args) {
if (args.length < 7) {
System.err
.println("Usage fromDirectory toDirectory fileSuffix <<segmentName_i> <weight_i> ...>");
System.exit(-1);
}
final File fromDirectory = new File(args[0]);
final File toDirectory = new File(args[1]);
final IOFileFilter fileFilter = FileFilterUtils
.suffixFileFilter(args[2]);
final Map<String, Double> segments = Maps.newHashMap();
for (int i = 3; i < args.length; i += 2) {
segments.put(args[i], Double.valueOf(args[i + 1]));
}
LOGGER.info("Splitting files in segments " + segments);
splitFiles(fromDirectory, toDirectory, segments, fileFilter,
UNIFORM_FILE_WEIGHT);
}
示例3: run
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
private void run() throws Exception {
IOFileFilter corpusFileFilter;
if (corpusFileSuffix == null) {
corpusFileFilter = FileFilterUtils.trueFileFilter();
} else {
corpusFileFilter = FileFilterUtils.suffixFileFilter(corpusFileSuffix);
}
IOFileFilter corpusSubDirFilter = includeSubDirectores ? TrueFileFilter.INSTANCE : null;
List<Set<File>> partitions = Lists.newArrayList(CorpusUtils.partitionCorpusByFileSize(
corpusDir, corpusFileFilter, corpusSubDirFilter, partitionsNum));
if (partitions.size() != partitionsNum) {
throw new IllegalStateException();
}
// make dev partition from the last because it is a little bit smaller
Set<File> devFiles = getAndRemove(partitions, partitions.size() - 1);
Set<File> testFiles = getAndRemove(partitions, partitions.size() - 1);
Set<File> trainFiles = Sets.newLinkedHashSet();
for (Set<File> s : partitions) {
trainFiles.addAll(s);
}
// write files
File devPartFile = new File(outputDir, CorpusUtils.getDevPartitionFilename(0));
FileUtils.writeLines(devPartFile, "utf-8", CorpusUtils.toRelativePaths(corpusDir, devFiles));
File testPartFile = new File(outputDir, CorpusUtils.getTestPartitionFilename(0));
FileUtils.writeLines(testPartFile, "utf-8", CorpusUtils.toRelativePaths(corpusDir, testFiles));
File trainPartFile = new File(outputDir, CorpusUtils.getTrainPartitionFilename(0));
FileUtils.writeLines(trainPartFile, "utf-8", CorpusUtils.toRelativePaths(corpusDir, trainFiles));
}
示例4: run
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
private void run() throws Exception {
IOFileFilter corpusFileFilter;
if (corpusFileSuffix == null) {
corpusFileFilter = FileFilterUtils.trueFileFilter();
} else {
corpusFileFilter = FileFilterUtils.suffixFileFilter(corpusFileSuffix);
}
IOFileFilter corpusSubDirFilter = includeSubDirectores ? TrueFileFilter.INSTANCE : null;
List<CorpusSplit> corpusSplits = CorpusUtils.createCrossValidationSplits(corpusDir,
corpusFileFilter, corpusSubDirFilter, foldNum);
for (int i = 0; i < corpusSplits.size(); i++) {
writeFileLists(outputDir, i, corpusSplits.get(i));
}
}
示例5: initialize
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
@Override
public void initialize(UimaContext ctx) throws ResourceInitializationException {
super.initialize(ctx);
if (!directory.isDirectory()) {
throw new IllegalStateException(String.format(
"%s is not existing file directory", directory));
}
IOFileFilter fileFilter = FileFilterUtils.suffixFileFilter(fileExtension);
IOFileFilter subdirFilter = FileFilterUtils.trueFileFilter();
files = Lists.newArrayList(FileUtils.listFiles(directory, fileFilter, subdirFilter));
//
lastReadFileIdx = -1;
}
示例6: getDeliveredReports
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
public DeliveredReport[] getDeliveredReports(ReportUser user) throws DeliveryException
{
IOFileFilter extensionFilter = FileFilterUtils.suffixFileFilter("xml");
File directory = new File(directoryProvider.getReportGenerationDirectory());
ArrayList<DeliveredReport> deliveredReports = new ArrayList<DeliveredReport>();
Iterator iterator = FileUtils.iterateFiles(directory, extensionFilter, null);
while (iterator.hasNext())
{
File file = (File) iterator.next();
if (FilenameUtils.wildcardMatch(file.getName(), "*" + user.getName() + "*"))
{
XStream xStream = new XStream();
xStream.alias("reportGenerationInfo", DeliveredReport.class);
try
{
FileInputStream inputStream = new FileInputStream(file);
DeliveredReport report = (DeliveredReport) xStream.fromXML(inputStream);
deliveredReports.add(report);
inputStream.close();
}
catch(IOException io)
{
log.warn(io.toString());
}
}
}
DeliveredReport[] reports = new DeliveredReport[deliveredReports.size()];
deliveredReports.toArray(reports);
return reports;
}
示例7: findTranscripts
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
protected Collection<File> findTranscripts(File dir, String filePattern) {
IOFileFilter fileFilter = FileFilterUtils.suffixFileFilter(TRANSCRIPT_EXTENSION);
if (StringUtils.isNotBlank(filePattern)) {
fileFilter = FileFilterUtils.and(
fileFilter,
FileFilterUtils.nameFileFilter(filePattern));
}
//Get all XML files, no recursion
return FileUtils.listFiles(dir, fileFilter, null);
}
示例8: importGameStats
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
/**
* Copies H2 game stats database file BUT ONLY if the stats folder
* has been not yet been created (ie. post-install, not if you re-run
* the import process via the "Reset & restart" option).
*/
private void importGameStats() throws IOException {
setProgressNote(MText.get(_S14));
String directoryName = "stats";
Path sourcePath = importDataPath.resolve(directoryName);
Path targetPath = MagicFileSystem.getDataPath().resolve(directoryName);
if (sourcePath.toFile().exists() && MagicFileSystem.isMissingOrEmpty(targetPath)) {
IOFileFilter dbSuffixFilter = FileFilterUtils.suffixFileFilter(".db");
FileUtils.copyDirectory(sourcePath.toFile(), targetPath.toFile(), dbSuffixFilter);
}
setProgressNote(OK_STRING);
}
示例9: importCustomDecks
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
/**
* Merges top level "decks" folder only.
* Does not import sub-folders (prebuilt, firemind, etc).
* If file already exists then imported version takes precedence.
*/
private void importCustomDecks() throws IOException {
setProgressNote(MText.get(_S7));
final String directoryName = "decks";
final Path sourcePath = importDataPath.resolve(directoryName);
if (sourcePath.toFile().exists()) {
final Path targetPath = MagicFileSystem.getDataPath().resolve(directoryName);
final IOFileFilter deckSuffixFilter = FileFilterUtils.suffixFileFilter(".dec");
FileUtils.copyDirectory(sourcePath.toFile(), targetPath.toFile(), deckSuffixFilter);
}
setProgressNote(OK_STRING);
}
示例10: handleSQLStyleComments
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
public static void handleSQLStyleComments( String baseDir ) throws Exception {
IOFileFilter sourceFileFilter = FileFilterUtils.suffixFileFilter("sql");
sourceFileFilter = FileFilterUtils.makeSVNAware(sourceFileFilter);
sourceFileFilter = FileFilterUtils.makeFileOnly(sourceFileFilter);
LicensableFileDirectoryWalker dw = new LicensableFileDirectoryWalker(sourceFileFilter, "--", "-- ", LINE_SEPARATOR);
Collection<String> results = dw.run( baseDir );
System.out.println( results );
}
示例11: listClasses
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
/**
* List of all .class files from <b>classesDirectory</b>.
* @return A Collection of .class files
*/
private Collection<File> listClasses() {
final IOFileFilter classesFilter = FileFilterUtils
.suffixFileFilter(".class");
return FileUtils.listFiles(
this.classesDirectory, classesFilter, FileFilterUtils
.directoryFileFilter()
);
}
示例12: main
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
public static void main(String... args) throws Exception {
String outputDirectory = ExamplePosAnnotator.DEFAULT_OUTPUT_DIRECTORY;
// select all the .tree files in the treebank directory
File treebankDirectory = new File("data/pos/treebank");
IOFileFilter treeFileFilter = FileFilterUtils.suffixFileFilter(".tree");
Collection<File> files = FileUtils.listFiles(treebankDirectory, treeFileFilter, null);
// A collection reader that creates one CAS per file, containing the file's URI
CollectionReader reader = UriCollectionReader.getCollectionReaderFromFiles(files);
// The pipeline of annotators
AggregateBuilder builder = new AggregateBuilder();
// An annotator that creates an empty treebank view in the CAS
builder.add(AnalysisEngineFactory.createEngineDescription(
ViewCreatorAnnotator.class,
ViewCreatorAnnotator.PARAM_VIEW_NAME,
PennTreebankReader.TREEBANK_VIEW));
// An annotator that reads the treebank-formatted text into the treebank view
builder.add(
UriToDocumentTextAnnotator.getDescription(),
CAS.NAME_DEFAULT_SOFA,
PennTreebankReader.TREEBANK_VIEW);
// An annotator that uses the treebank text to add tokens and POS tags to the CAS
builder.add(TreebankGoldAnnotator.getDescriptionPOSTagsOnly());
// The POS annotator, configured to write training data
builder.add(ExamplePosAnnotator.getWriterDescription(outputDirectory));
// Run the pipeline of annotators on each of the CASes produced by the reader
SimplePipeline.runPipeline(reader, builder.createAggregateDescription());
System.out.println("training data written to " + ExamplePosAnnotator.DEFAULT_OUTPUT_DIRECTORY);
System.out.println("training model...");
// Train a classifier on the training data, and package it into a .jar file
Train.main(outputDirectory);
System.out.println("model written to "
+ JarClassifierBuilder.getModelJarFile(ExamplePosAnnotator.DEFAULT_OUTPUT_DIRECTORY).getPath());
}
示例13: AgilentCefFileFilter
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
public AgilentCefFileFilter() {
super(FileFilterUtils.suffixFileFilter(EXT, IOCase.INSENSITIVE));
}
示例14: UmpireSeFileFilter
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
public UmpireSeFileFilter() {
super(FileFilterUtils.suffixFileFilter(EXT, IOCase.INSENSITIVE));
}
示例15: BMSuffixFileFilter
import org.apache.commons.io.filefilter.FileFilterUtils; //导入方法依赖的package包/类
public BMSuffixFileFilter(String ext, String shortDesc, String desc) {
super(FileFilterUtils.suffixFileFilter(ext, IOCase.INSENSITIVE));
this.ext = ext;
this.desc = desc;
this.shortDesc = shortDesc;
}