当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.isFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.isFile方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.isFile方法的具体用法?Java FileSystem.isFile怎么用?Java FileSystem.isFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.isFile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: open

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void open(Path dstPath, CompressionCodec codeC,
    CompressionType compType, Configuration conf, FileSystem hdfs)
        throws IOException {
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
          (dstPath)) {
    outStream = hdfs.append(dstPath);
  } else {
    outStream = hdfs.create(dstPath);
  }
  writer = SequenceFile.createWriter(conf, outStream,
      serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);

  registerCurrentStream(outStream, hdfs, dstPath);
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:23,代码来源:HDFSSequenceFile.java

示例2: open

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void open(Path dstPath, CompressionCodec codeC,
    CompressionType compType, Configuration conf, FileSystem hdfs)
        throws IOException {
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    outStream = hdfs.append(dstPath);
  } else {
    outStream = hdfs.create(dstPath);
  }
  writer = SequenceFile.createWriter(conf, outStream,
      serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);

  registerCurrentStream(outStream, hdfs, dstPath);
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:22,代码来源:HDFSSequenceFile.java

示例3: ParquetMetadataStat

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 *
 * @param nameNode the hostname of hdfs namenode
 * @param hdfsPort the port of hdfs namenode, usually 9000 or 8020
 * @param dirPath the path of the directory which contains the parquet files, begin with /, for gen /msra/column/order/parquet/
 * @throws IOException
 * @throws MetadataException
 */
public ParquetMetadataStat(String nameNode, int hdfsPort, String dirPath) throws IOException, MetadataException
{
    Configuration conf = new Configuration();
    FileSystem fileSystem = FileSystem.get(URI.create("hdfs://" + nameNode + ":" + hdfsPort), conf);
    Path hdfsDirPath = new Path(dirPath);
    if (! fileSystem.isFile(hdfsDirPath))
    {
        FileStatus[] fileStatuses = fileSystem.listStatus(hdfsDirPath);
        for (FileStatus status : fileStatuses)
        {
            // compatibility for HDFS 1.x
            if (! status.isDir())
            {
                //System.out.println(status.getPath().toString());
                this.fileMetaDataList.add(new ParquetFileMetadata(conf, status.getPath()));
            }
        }
    }
    if (this.fileMetaDataList.size() == 0)
    {
        throw new MetadataException("fileMetaDataList is empty, path is not a dir.");
    }
    this.fields = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFields();
    this.columnCount = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFieldCount();
}
 
开发者ID:dbiir,项目名称:rainbow,代码行数:34,代码来源:ParquetMetadataStat.java

示例4: computeSourceRootPath

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private Path computeSourceRootPath(FileStatus sourceStatus,
                                   DistCpOptions options) throws IOException {

  Path target = options.getTargetPath();
  FileSystem targetFS = target.getFileSystem(getConf());
  final boolean targetPathExists = options.getTargetPathExists();

  boolean solitaryFile = options.getSourcePaths().size() == 1
                                              && !sourceStatus.isDirectory();

  if (solitaryFile) {
    if (targetFS.isFile(target) || !targetPathExists) {
      return sourceStatus.getPath();
    } else {
      return sourceStatus.getPath().getParent();
    }
  } else {
    boolean specialHandling = (options.getSourcePaths().size() == 1 && !targetPathExists) ||
        options.shouldSyncFolder() || options.shouldOverwrite();

    return specialHandling && sourceStatus.isDirectory() ? sourceStatus.getPath() :
        sourceStatus.getPath().getParent();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:SimpleCopyListing.java

示例5: debugLsr

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * ls -r for debugging purposes
 */
public static void debugLsr(Configuration conf,
    Path p, ErrorReporter errors) throws IOException {
  if (!LOG.isDebugEnabled() || p == null) {
    return;
  }
  FileSystem fs = p.getFileSystem(conf);

  if (!fs.exists(p)) {
    // nothing
    return;
  }
  errors.print(p.toString());

  if (fs.isFile(p)) {
    return;
  }

  if (fs.getFileStatus(p).isDirectory()) {
    FileStatus[] fss= fs.listStatus(p);
    for (FileStatus status : fss) {
      debugLsr(conf, status.getPath(), errors);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:HBaseFsck.java

示例6: isLegalHdfsFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * 判断指定路径的文件是否有效, 即文件存在,且可读
 * 
 * @param hdsfFilePath
 * @return 有效返回true, 否则返回false
 */
public static boolean isLegalHdfsFile(FileSystem hdfsFS, String hdsfFilePath) {
    try {
        return hdfsFS.isFile(new Path(hdsfFilePath));
    } catch (IllegalArgumentException iae) {
        return false;
    } catch (IOException e) {
        return false;
    }
}
 
开发者ID:tencentyun,项目名称:hdfs_to_cos_tools,代码行数:16,代码来源:CommonHdfsUtils.java

示例7: isFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * 此方法用于判断文件是否是file
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param path
 *            文件路径
 * @return 是否是file
 */
public static boolean isFile(FileSystemInfo fileSystemInfo, String path) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path uri = new Path(path);
	try {
		pathNotExistCheck(path, fs, uri);
		return fs.isFile(uri);
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}
	return false;
}
 
开发者ID:zhangjunfang,项目名称:alluxio,代码行数:23,代码来源:HdfsAndAlluxioUtils_update.java

示例8: doOpen

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }

  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    outStream = hdfs.append(dstPath);
    appending = true;
  } else {
    outStream = hdfs.create(dstPath);
  }

  serializer = EventSerializerFactory.getInstance(
      serializerType, serializerContext, outStream);
  if (appending && !serializer.supportsReopen()) {
    outStream.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType +
        ") does not support append");
  }

  // must call superclass to check for replication issues
  registerCurrentStream(outStream, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:37,代码来源:HDFSDataStream.java

示例9: doOpen

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void doOpen(Configuration conf,
  Path dstPath, FileSystem hdfs) throws
  IOException {
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }

  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
          (dstPath)) {
    outStream = hdfs.append(dstPath);
    appending = true;
  } else {
    outStream = hdfs.create(dstPath);
  }

  serializer = EventSerializerFactory.getInstance(
      serializerType, serializerContext, outStream);
  if (appending && !serializer.supportsReopen()) {
    outStream.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType +
        ") does not support append");
  }

  // must call superclass to check for replication issues
  registerCurrentStream(outStream, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:40,代码来源:HDFSDataStream.java

示例10: verifyPath

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Verify that given path leads to a file that we can read.
 *
 * @param fs Associated FileSystem
 * @param path Path
 * @throws IOException
 */
protected void verifyPath(FileSystem fs, Path path) throws IOException {
  if (!fs.exists(path)) {
    throw new IOException("The provided password file " + path
      + " does not exist!");
  }

  if (!fs.isFile(path)) {
    throw new IOException("The provided password file " + path
      + " is a directory!");
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:19,代码来源:FilePasswordLoader.java

示例11: recursiveCheckChildPathName

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Recursively check if a given path and its child paths have colons in their
 * names. It returns true if none of them has a colon or this path does not
 * exist, and false otherwise.
 */
private boolean recursiveCheckChildPathName(FileSystem fs, Path p)
    throws IOException {
  if (p == null) {
    return true;
  }
  if (!fs.exists(p)) {
    System.out.println("Path " + p + " does not exist!");
    return true;
  }

  if (fs.isFile(p)) {
    if (containsColon(p)) {
      System.out.println("Warning: file " + p + " has a colon in its name.");
      return false;
    } else {
      return true;
    }
  } else {
    boolean flag;
    if (containsColon(p)) {
      System.out.println("Warning: directory " + p
          + " has a colon in its name.");
      flag = false;
    } else {
      flag = true;
    }
    FileStatus[] listed = fs.listStatus(p);
    for (FileStatus l : listed) {
      if (!recursiveCheckChildPathName(fs, l.getPath())) {
        flag = false;
      }
    }
    return flag;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:WasbFsck.java

示例12: changeUserGroup

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void changeUserGroup(String user, String group)
        throws IOException {
  FileSystem fs = cluster.getFileSystem();
  FsPermission changedPermission = new FsPermission(
          FsAction.ALL, FsAction.ALL, FsAction.ALL
  );
  for (Path path : pathList)
    if (fs.isFile(path)) {
      fs.setOwner(path, user, group);
      fs.setPermission(path, changedPermission);
    }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestCopyMapper.java

示例13: isTableDir

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static boolean isTableDir(final FileSystem fs, final Path path) throws IOException {
  // check for old format, of having /table/.tableinfo; hbase:meta doesn't has .tableinfo,
  // include it.
  if (fs.isFile(path)) return false;
  return (FSTableDescriptors.getTableInfoPath(fs, path) != null || FSTableDescriptors
      .getCurrentTableInfoStatus(fs, path, false) != null) || path.toString().endsWith(".META.");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:HFileV1Detector.java

示例14: open

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public void open(String filePath, CompressionCodec codec,
    CompressionType cType) throws IOException {
  Configuration conf = new Configuration();
  Path dstPath = new Path(filePath);
  FileSystem hdfs = dstPath.getFileSystem(conf);
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    fsOut = hdfs.append(dstPath);
    appending = true;
  } else {
    fsOut = hdfs.create(dstPath);
  }
  if (compressor == null) {
    compressor = CodecPool.getCompressor(codec, conf);
  }
  cmpOut = codec.createOutputStream(fsOut, compressor);
  serializer = EventSerializerFactory.getInstance(serializerType,
      serializerContext, cmpOut);
  if (appending && !serializer.supportsReopen()) {
    cmpOut.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType
        + ") does not support append");
  }

  registerCurrentStream(fsOut, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
  isFinished = false;
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:44,代码来源:HDFSCompressedDataStream.java

示例15: open

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public void open(String filePath, CompressionCodec codec,
    CompressionType cType) throws IOException {
  Configuration conf = new Configuration();
  Path dstPath = new Path(filePath);
  FileSystem hdfs = dstPath.getFileSystem(conf);
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
  (dstPath)) {
    fsOut = hdfs.append(dstPath);
    appending = true;
  } else {
    fsOut = hdfs.create(dstPath);
  }
  if(compressor == null) {
    compressor = CodecPool.getCompressor(codec, conf);
  }
  cmpOut = codec.createOutputStream(fsOut, compressor);
  serializer = EventSerializerFactory.getInstance(serializerType,
      serializerContext, cmpOut);
  if (appending && !serializer.supportsReopen()) {
    cmpOut.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType
        + ") does not support append");
  }

  registerCurrentStream(fsOut, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
  isFinished = false;
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:45,代码来源:HDFSCompressedDataStream.java


注:本文中的org.apache.hadoop.fs.FileSystem.isFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。