当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.isDirectory方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.isDirectory方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.isDirectory方法的具体用法?Java FileSystem.isDirectory怎么用?Java FileSystem.isDirectory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.isDirectory方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setup

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
protected void setup(Context context) throws IOException, InterruptedException {
    String BPath = context.getConfiguration().get("mpath");
    Bw = context.getConfiguration().getInt("mw", -1);
    Bh = context.getConfiguration().getInt("mh", -1);
    prefix = context.getConfiguration().get("prefix", "");

    Path pt = new Path(BPath);
    Configuration conf = new Configuration();
    conf.setBoolean("fs.hdfs.impl.disable.cache", true);
    FileSystem fs = FileSystem.get(conf);

    if (fs.isDirectory(pt)) {
        B = readMatrixFromOutput(pt, Bh, Bw);
    } else {
        B = new double[Bh][Bw];
        readMatrixFromFile(fs, pt, B);
    }

}
 
开发者ID:Romm17,项目名称:MRNMF,代码行数:21,代码来源:MM1.java

示例2: cd

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@CliCommand(value = "cd", help = "Changes current dir")
public String cd(@CliOption(key = {""}, help = "cd [<path>]") String newDir) {
    if (StringUtils.isEmpty(newDir)) {
        newDir = getHomeDir();
    }

    final Path path = (newDir.startsWith("/")) ? new Path(newDir) : new Path(getCurrentDir(), newDir);
    try {
        final FileSystem fs = getFileSystem();
        if (fs.exists(path) && fs.isDirectory(path)) {
            currentDir = path.toUri().getPath();
        } else {
            return "-shell: cd: " + newDir + " No such file or directory";
        }
    } catch (Exception e) {
        return "Change directory failed! " + e.getMessage();
    }
    return "";
}
 
开发者ID:avast,项目名称:hdfs-shell,代码行数:20,代码来源:ContextCommands.java

示例3: main

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void main(String []args) {
  try (final BufferAllocator bufferAllocator = new RootAllocator(SabotConfig.getMaxDirectMemory())) {
    final Path tableDir  = new Path(args[0]);
    final FileSystem fs = tableDir.getFileSystem(new Configuration());
    if (fs.exists(tableDir) && fs.isDirectory(tableDir)) {
      Map<ColumnDescriptor, Path> dictionaryEncodedColumns = createGlobalDictionaries(fs, tableDir, bufferAllocator).getColumnsToDictionaryFiles();
      long version = getDictionaryVersion(fs, tableDir);
      Path dictionaryRootDir = getDictionaryVersionedRootPath(fs, tableDir, version);
      for (ColumnDescriptor columnDescriptor: dictionaryEncodedColumns.keySet()) {
        final VectorContainer data = readDictionary(fs, dictionaryRootDir, columnDescriptor, bufferAllocator);
        System.out.println("Dictionary for column [" + columnDescriptor.toString() + " size " + data.getRecordCount());
        BatchPrinter.printBatch(data);
        data.clear();
      }
    }
  } catch (IOException ioe) {
    logger.error("Failed ", ioe);
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:20,代码来源:GlobalDictionaryBuilder.java

示例4: scanForDictionaryEncodedColumns

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Check if any columns are dictionary encoded by looking up for .dict files
 * @param fs filesystem
 * @param selectionRoot root of table
 * @param batchSchema schema for this parquet table
 */
public static DictionaryEncodedColumns scanForDictionaryEncodedColumns(FileSystem fs, String selectionRoot, BatchSchema batchSchema) {
  try {
    Path root = new Path(selectionRoot);
    if (!fs.isDirectory(root)) {
      root = root.getParent();
    }
    long version = GlobalDictionaryBuilder.getDictionaryVersion(fs, root);
    if (version != -1) {
      final List<String> columns = Lists.newArrayList();
      final DictionaryEncodedColumns dictionaryEncodedColumns = new DictionaryEncodedColumns();
      root = GlobalDictionaryBuilder.getDictionaryVersionedRootPath(fs, root, version);
      for (Field field : batchSchema.getFields()) {
        final Path dictionaryFilePath = GlobalDictionaryBuilder.getDictionaryFile(fs, root, field.getName());
        if (dictionaryFilePath != null) {
          columns.add(field.getName());
        }
      }
      if (!columns.isEmpty()) {
        dictionaryEncodedColumns.setVersion(version);
        dictionaryEncodedColumns.setRootPath(root.toString());
        dictionaryEncodedColumns.setColumnsList(columns);
        return dictionaryEncodedColumns;
      }
    }
  } catch (UnsupportedOperationException e) { // class path based filesystem doesn't support listing
    if (!ClassPathFileSystem.SCHEME.equals(fs.getUri().getScheme())) {
      throw e;
    }
  } catch (IOException ioe) {
    logger.warn(format("Failed to scan directory %s for global dictionary", selectionRoot), ioe);
  }
  return null;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:40,代码来源:ParquetFormatPlugin.java

示例5: readKeysToSearch

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
static SortedSet<byte []> readKeysToSearch(final Configuration conf)
throws IOException, InterruptedException {
  Path keysInputDir = new Path(conf.get(SEARCHER_INPUTDIR_KEY));
  FileSystem fs = FileSystem.get(conf);
  SortedSet<byte []> result = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
  if (!fs.exists(keysInputDir)) {
    throw new FileNotFoundException(keysInputDir.toString());
  }
  if (!fs.isDirectory(keysInputDir)) {
    throw new UnsupportedOperationException("TODO");
  } else {
    RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(keysInputDir, false);
    while(iterator.hasNext()) {
      LocatedFileStatus keyFileStatus = iterator.next();
      // Skip "_SUCCESS" file.
      if (keyFileStatus.getPath().getName().startsWith("_")) continue;
      result.addAll(readFileToSearch(conf, fs, keyFileStatus));
    }
  }
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:IntegrationTestBigLinkedList.java

示例6: addInpuPath

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void addInpuPath(Job job, Path path) throws IOException {
    FileSystem fs = path.getFileSystem(new Configuration());
    if (fs.isDirectory(path)) {
        for (Path p : FileUtil.stat2Paths(fs.listStatus(path))) {
            if (p.toString().contains("part"))
                FileInputFormat.addInputPath(job, p);
        }
    } else {
        FileInputFormat.addInputPath(job, path);
    }
}
 
开发者ID:Romm17,项目名称:MRNMF,代码行数:12,代码来源:MatrixUpdater.java

示例7: generateAndLogErrorListing

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected String generateAndLogErrorListing(Path src, Path dst) throws
                                                                IOException {
  FileSystem fs = getFileSystem();
  getLog().error(
    "src dir " + ContractTestUtils.ls(fs, src.getParent()));
  String destDirLS = ContractTestUtils.ls(fs, dst.getParent());
  if (fs.isDirectory(dst)) {
    //include the dir into the listing
    destDirLS = destDirLS + "\n" + ContractTestUtils.ls(fs, dst);
  }
  return destDirLS;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:13,代码来源:AbstractFSContractTestBase.java

示例8: isDirectory

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * 此方法用于判断文件是否是dir
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param path
 *            文件路径
 * @return 是否是dir
 */
public static boolean isDirectory(FileSystemInfo fileSystemInfo, String path) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path uri = new Path(path);
	try {
		pathNotExistCheck(path, fs, uri);
		return fs.isDirectory(uri);
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}
	return false;
}
 
开发者ID:zhangjunfang,项目名称:alluxio,代码行数:23,代码来源:HdfsAndAlluxioUtils_update.java

示例9: getAvroSchema

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Get the schema of AVRO files stored in a directory
 */
public static Schema getAvroSchema(Path path, Configuration conf)
    throws IOException {
  FileSystem fs = path.getFileSystem(conf);
  Path fileToTest;
  if (fs.isDirectory(path)) {
    FileStatus[] fileStatuses = fs.listStatus(path, new PathFilter() {
      @Override
      public boolean accept(Path p) {
        String name = p.getName();
        return !name.startsWith("_") && !name.startsWith(".");
      }
    });
    if (fileStatuses.length == 0) {
      return null;
    }
    fileToTest = fileStatuses[0].getPath();
  } else {
    fileToTest = path;
  }

  SeekableInput input = new FsInput(fileToTest, conf);
  DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>();
  FileReader<GenericRecord> fileReader = DataFileReader.openReader(input, reader);

  Schema result = fileReader.getSchema();
  fileReader.close();
  return result;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:32,代码来源:AvroUtil.java

示例10: getDictionaryVersionedRootPath

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static Path getDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long version) throws IOException {
  final Path dictionaryRootDir = new Path(tableDir, dictionaryRootDirName(version));
  if (version != -1 && fs.exists(dictionaryRootDir) && fs.isDirectory(dictionaryRootDir)) {
    return dictionaryRootDir;
  }
  return null;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:8,代码来源:GlobalDictionaryBuilder.java

示例11: setOwner

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Set path ownership.
 */
private void setOwner(final FileSystem fs, final Path path, final String user,
    final String group, final boolean recursive) throws IOException {
  if (user != null || group != null) {
    if (recursive && fs.isDirectory(path)) {
      for (FileStatus child : fs.listStatus(path)) {
        setOwner(fs, child.getPath(), user, group, recursive);
      }
    }
    fs.setOwner(path, user, group);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:ExportSnapshot.java

示例12: setPermission

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Set path permission.
 */
private void setPermission(final FileSystem fs, final Path path, final short filesMode,
    final boolean recursive) throws IOException {
  if (filesMode > 0) {
    FsPermission perm = new FsPermission(filesMode);
    if (recursive && fs.isDirectory(path)) {
      for (FileStatus child : fs.listStatus(path)) {
        setPermission(fs, child.getPath(), filesMode, recursive);
      }
    }
    fs.setPermission(path, perm);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:ExportSnapshot.java

示例13: generateHDFSFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Generate a batch of files on HDFS
 * @param blockSize in bytes
 * @param blockCount
 * @param dataPath
 * @return
 * @throws IOException
 */
public List<FileStatus> generateHDFSFile (final long blockSize, final long blockCount,
                                          String dataPath, ProgressListener progressListener) throws IOException
{
    Configuration conf = new Configuration();

    conf.setBoolean("dfs.support.append", true);
    FileSystem fs = FileSystem.get(URI.create("hdfs://" +
            ConfigFactory.Instance().getProperty("namenode.host") + ":" +
            ConfigFactory.Instance().getProperty("namenode.port")+ dataPath), conf);

    Path path = new Path(dataPath);
    if (fs.exists(path) || !fs.isDirectory(path))
    {
        throw new IOException("data path exists or is not a directory");
    }

    fs.mkdirs(new Path(dataPath));
    List<FileStatus> statuses = new ArrayList<FileStatus>();
    if (dataPath.charAt(dataPath.length()-1) != '/' && dataPath.charAt(dataPath.length()-1) != '\\')
    {
        dataPath += "/";
    }

    // 1 MB buffer
    final int bufferSize = 1 * 1024 * 1024;
    byte[] buffer = new byte[bufferSize];
    buffer[0] = 1;
    buffer[1] = 2;
    buffer[2] = 3;

    // number of buffers to write for each block
    long n = blockSize / bufferSize;

    for (int i = 0; i < blockCount; ++i)
    {
        // one block per file
        Path filePath = new Path(dataPath + i);
        FSDataOutputStream out = fs.create(filePath, false, bufferSize, (short) 1, n * bufferSize);

        for (int j = 0; j < n; ++j)
        {
            out.write(buffer);
        }
        out.flush();
        out.close();
        statuses.add(fs.getFileStatus(filePath));
        progressListener.setPercentage(1.0 * i / blockCount);
    }
    return statuses;
}
 
开发者ID:dbiir,项目名称:rainbow,代码行数:59,代码来源:FileGenerator.java

示例14: setUpBeforeClass

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Start up our mini cluster on top of an 0.92 root.dir that has data from
  // a 0.92 hbase run -- it has a table with 100 rows in it  -- and see if
  // we can migrate from 0.92
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.startMiniDFSCluster(1);
  Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationConvertToPB");
  // Untar our test dir.
  File untar = untar(new File(testdir.toString()));
  // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
  Configuration conf = TEST_UTIL.getConfiguration();
  FsShell shell = new FsShell(conf);
  FileSystem fs = FileSystem.get(conf);
  // find where hbase will root itself, so we can copy filesystem there
  Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
  if (!fs.isDirectory(hbaseRootDir.getParent())) {
    // mkdir at first
    fs.mkdirs(hbaseRootDir.getParent());
  }
  doFsCommand(shell,
    new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});

  // windows fix: tgz file has hbase:meta directory renamed as -META- since the original
  // is an illegal name under windows. So we rename it back.
  // See src/test/data//TestMetaMigrationConvertingToPB.README and
  // https://issues.apache.org/jira/browse/HBASE-6821
  doFsCommand(shell, new String [] {"-mv", new Path(hbaseRootDir, "-META-").toString(),
    new Path(hbaseRootDir, ".META.").toString()});
  // See whats in minihdfs.
  doFsCommand(shell, new String [] {"-lsr", "/"});

  //upgrade to namespace as well
  Configuration toolConf = TEST_UTIL.getConfiguration();
  conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
  ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});

  TEST_UTIL.startMiniHBaseCluster(1, 1);
  // Assert we are running against the copied-up filesystem.  The copied-up
  // rootdir should have had a table named 'TestTable' in it.  Assert it
  // present.
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE);
  ResultScanner scanner = t.getScanner(new Scan());
  int count = 0;
  while (scanner.next() != null) {
    count++;
  }
  // Assert that we find all 100 rows that are in the data we loaded.  If
  // so then we must have migrated it from 0.90 to 0.92.
  Assert.assertEquals(ROW_COUNT, count);
  scanner.close();
  t.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestMetaMigrationConvertingToPB.java

示例15: pathNotDirectoryCheck

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * 此方法用于检测分布式系统中是否是文件夹
 *
 * @param uri
 *            uri
 * @param fs
 *            FileSystem
 * @param path
 *            path
 * @throws IOException
 */
private static void pathNotDirectoryCheck(String uri, FileSystem fs, Path path) throws IOException {
	if (!fs.isDirectory(path)) {
		throw new RuntimeException(NOT_DIR_EXECEPTION_MSG + uri);
	}
}
 
开发者ID:zhangjunfang,项目名称:alluxio,代码行数:17,代码来源:HdfsAndAlluxioUtils_update.java


注:本文中的org.apache.hadoop.fs.FileSystem.isDirectory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。