当前位置: 首页>>代码示例>>Python>>正文


Python HDFileSystem.exists方法代码示例

本文整理汇总了Python中hdfs3.HDFileSystem.exists方法的典型用法代码示例。如果您正苦于以下问题:Python HDFileSystem.exists方法的具体用法?Python HDFileSystem.exists怎么用?Python HDFileSystem.exists使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在hdfs3.HDFileSystem的用法示例。


在下文中一共展示了HDFileSystem.exists方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: hdfs

# 需要导入模块: from hdfs3 import HDFileSystem [as 别名]
# 或者: from hdfs3.HDFileSystem import exists [as 别名]
def hdfs():
    hdfs = HDFileSystem(host='localhost', port=8020)
    if hdfs.exists('/tmp/test'):
        hdfs.rm('/tmp/test')
    hdfs.mkdir('/tmp/test')

    yield hdfs

    if hdfs.exists('/tmp/test'):
        hdfs.rm('/tmp/test')
开发者ID:bdrosen96,项目名称:hdfs3,代码行数:12,代码来源:test_hdfs3.py

示例2: make_hdfs

# 需要导入模块: from hdfs3 import HDFileSystem [as 别名]
# 或者: from hdfs3.HDFileSystem import exists [as 别名]
def make_hdfs():
    hdfs = HDFileSystem(host='localhost', port=8020)
    if hdfs.exists('/tmp/test'):
        hdfs.rm('/tmp/test')
    hdfs.mkdir('/tmp/test')

    try:
        yield hdfs
    finally:
        if hdfs.exists('/tmp/test'):
            hdfs.rm('/tmp/test')
开发者ID:minrk,项目名称:distributed,代码行数:13,代码来源:test_hdfs.py

示例3: make_hdfs

# 需要导入模块: from hdfs3 import HDFileSystem [as 别名]
# 或者: from hdfs3.HDFileSystem import exists [as 别名]
def make_hdfs():
    from hdfs3 import HDFileSystem
    # from .hdfs import DaskHDFileSystem
    basedir = '/tmp/test-distributed'
    hdfs = HDFileSystem(host='localhost', port=8020)
    if hdfs.exists(basedir):
        hdfs.rm(basedir)
    hdfs.mkdir(basedir)

    try:
        yield hdfs, basedir
    finally:
        if hdfs.exists(basedir):
            hdfs.rm(basedir)
开发者ID:dask,项目名称:distributed,代码行数:16,代码来源:utils_test.py

示例4: open

# 需要导入模块: from hdfs3 import HDFileSystem [as 别名]
# 或者: from hdfs3.HDFileSystem import exists [as 别名]
        return open(path, mode)

    def put(self, src, dst):
        return shutil.copy(src, dst)

if __name__ == "__main__":
    # load the hdfs node info
    f = open('hdfs.yml', 'r')
    data = yaml.load(f)
    f.close()

    hdfs_nn = data['hdfs_nn']
    hdfs = HDFileSystem(host=hdfs_nn, port=data['hdfs_port'])

    tfs = TransparentFileSystem(hdfs)
    print hdfs.exists('/tmp')
    # print hdfs.hoge('/tmp')
    print tfs.exists('/tmp')
    # print tfs.hoge('/tmp')

    # tfs_local = TransparentFileSystem()
    # print tfs_local.glob('/var/tmp')

    print 'test'
    print tfs.glob('/tmp')
    # tfs.hoge()
    tfs_local = TransparentFileSystem()
    # print tfs_local.glob('/home/vagrant/work/data/*')
    # tfs_local.hoge()

    # print tfs.hoge()
开发者ID:hirolovesbeer,项目名称:tfs,代码行数:33,代码来源:hdfs_wrapper.py

示例5: HadoopFileSystem

# 需要导入模块: from hdfs3 import HDFileSystem [as 别名]
# 或者: from hdfs3.HDFileSystem import exists [as 别名]
class HadoopFileSystem(FileSystem):
  """``FileSystem`` implementation that supports HDFS.

  URL arguments to methods expect strings starting with ``hdfs://``.

  Uses client library :class:`hdfs3.core.HDFileSystem`.
  """

  def __init__(self):
    """Initializes a connection to HDFS.

    Connection configuration is done using :doc:`hdfs`.
    """
    super(HadoopFileSystem, self).__init__()
    self._hdfs_client = HDFileSystem()

  @classmethod
  def scheme(cls):
    return 'hdfs'

  @staticmethod
  def _parse_url(url):
    """Verifies that url begins with hdfs:// prefix, strips it and adds a
    leading /.

    Raises:
      ValueError if url doesn't begin with hdfs://.

    Args:
      url: A URL in the form hdfs://path/...

    Returns:
      For an input of 'hdfs://path/...', will return '/path/...'.
    """
    m = _URL_RE.match(url)
    if m is None:
      raise ValueError('Could not parse url: %s' % url)
    return m.group(1)

  def join(self, base_url, *paths):
    """Join two or more pathname components.

    Args:
      base_url: string path of the first component of the path.
        Must start with hdfs://.
      paths: path components to be added

    Returns:
      Full url after combining all the passed components.
    """
    basepath = self._parse_url(base_url)
    return _HDFS_PREFIX + self._join(basepath, *paths)

  def _join(self, basepath, *paths):
    return posixpath.join(basepath, *paths)

  def split(self, url):
    rel_path = self._parse_url(url)
    head, tail = posixpath.split(rel_path)
    return _HDFS_PREFIX + head, tail

  def mkdirs(self, url):
    path = self._parse_url(url)
    if self._exists(path):
      raise IOError('Path already exists: %s' % path)
    return self._mkdirs(path)

  def _mkdirs(self, path):
    self._hdfs_client.makedirs(path)

  def match(self, url_patterns, limits=None):
    if limits is None:
      limits = [None] * len(url_patterns)

    if len(url_patterns) != len(limits):
      raise BeamIOError(
          'Patterns and limits should be equal in length: %d != %d' % (
              len(url_patterns), len(limits)))

    # TODO(udim): Update client to allow batched results.
    def _match(path_pattern, limit):
      """Find all matching paths to the pattern provided."""
      file_infos = self._hdfs_client.ls(path_pattern, detail=True)[:limit]
      metadata_list = [FileMetadata(file_info['name'], file_info['size'])
                       for file_info in file_infos]
      return MatchResult(path_pattern, metadata_list)

    exceptions = {}
    result = []
    for url_pattern, limit in zip(url_patterns, limits):
      try:
        path_pattern = self._parse_url(url_pattern)
        result.append(_match(path_pattern, limit))
      except Exception as e:  # pylint: disable=broad-except
        exceptions[url_pattern] = e

    if exceptions:
      raise BeamIOError('Match operation failed', exceptions)
    return result

#.........这里部分代码省略.........
开发者ID:aaltay,项目名称:incubator-beam,代码行数:103,代码来源:hadoopfilesystem.py


注:本文中的hdfs3.HDFileSystem.exists方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。