本文整理汇总了Python中hdfs3.HDFileSystem.exists方法的典型用法代码示例。如果您正苦于以下问题:Python HDFileSystem.exists方法的具体用法?Python HDFileSystem.exists怎么用?Python HDFileSystem.exists使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类hdfs3.HDFileSystem
的用法示例。
在下文中一共展示了HDFileSystem.exists方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: hdfs
# 需要导入模块: from hdfs3 import HDFileSystem [as 别名]
# 或者: from hdfs3.HDFileSystem import exists [as 别名]
def hdfs():
hdfs = HDFileSystem(host='localhost', port=8020)
if hdfs.exists('/tmp/test'):
hdfs.rm('/tmp/test')
hdfs.mkdir('/tmp/test')
yield hdfs
if hdfs.exists('/tmp/test'):
hdfs.rm('/tmp/test')
示例2: make_hdfs
# 需要导入模块: from hdfs3 import HDFileSystem [as 别名]
# 或者: from hdfs3.HDFileSystem import exists [as 别名]
def make_hdfs():
hdfs = HDFileSystem(host='localhost', port=8020)
if hdfs.exists('/tmp/test'):
hdfs.rm('/tmp/test')
hdfs.mkdir('/tmp/test')
try:
yield hdfs
finally:
if hdfs.exists('/tmp/test'):
hdfs.rm('/tmp/test')
示例3: make_hdfs
# 需要导入模块: from hdfs3 import HDFileSystem [as 别名]
# 或者: from hdfs3.HDFileSystem import exists [as 别名]
def make_hdfs():
from hdfs3 import HDFileSystem
# from .hdfs import DaskHDFileSystem
basedir = '/tmp/test-distributed'
hdfs = HDFileSystem(host='localhost', port=8020)
if hdfs.exists(basedir):
hdfs.rm(basedir)
hdfs.mkdir(basedir)
try:
yield hdfs, basedir
finally:
if hdfs.exists(basedir):
hdfs.rm(basedir)
示例4: open
# 需要导入模块: from hdfs3 import HDFileSystem [as 别名]
# 或者: from hdfs3.HDFileSystem import exists [as 别名]
return open(path, mode)
def put(self, src, dst):
return shutil.copy(src, dst)
if __name__ == "__main__":
# load the hdfs node info
f = open('hdfs.yml', 'r')
data = yaml.load(f)
f.close()
hdfs_nn = data['hdfs_nn']
hdfs = HDFileSystem(host=hdfs_nn, port=data['hdfs_port'])
tfs = TransparentFileSystem(hdfs)
print hdfs.exists('/tmp')
# print hdfs.hoge('/tmp')
print tfs.exists('/tmp')
# print tfs.hoge('/tmp')
# tfs_local = TransparentFileSystem()
# print tfs_local.glob('/var/tmp')
print 'test'
print tfs.glob('/tmp')
# tfs.hoge()
tfs_local = TransparentFileSystem()
# print tfs_local.glob('/home/vagrant/work/data/*')
# tfs_local.hoge()
# print tfs.hoge()
示例5: HadoopFileSystem
# 需要导入模块: from hdfs3 import HDFileSystem [as 别名]
# 或者: from hdfs3.HDFileSystem import exists [as 别名]
class HadoopFileSystem(FileSystem):
"""``FileSystem`` implementation that supports HDFS.
URL arguments to methods expect strings starting with ``hdfs://``.
Uses client library :class:`hdfs3.core.HDFileSystem`.
"""
def __init__(self):
"""Initializes a connection to HDFS.
Connection configuration is done using :doc:`hdfs`.
"""
super(HadoopFileSystem, self).__init__()
self._hdfs_client = HDFileSystem()
@classmethod
def scheme(cls):
return 'hdfs'
@staticmethod
def _parse_url(url):
"""Verifies that url begins with hdfs:// prefix, strips it and adds a
leading /.
Raises:
ValueError if url doesn't begin with hdfs://.
Args:
url: A URL in the form hdfs://path/...
Returns:
For an input of 'hdfs://path/...', will return '/path/...'.
"""
m = _URL_RE.match(url)
if m is None:
raise ValueError('Could not parse url: %s' % url)
return m.group(1)
def join(self, base_url, *paths):
"""Join two or more pathname components.
Args:
base_url: string path of the first component of the path.
Must start with hdfs://.
paths: path components to be added
Returns:
Full url after combining all the passed components.
"""
basepath = self._parse_url(base_url)
return _HDFS_PREFIX + self._join(basepath, *paths)
def _join(self, basepath, *paths):
return posixpath.join(basepath, *paths)
def split(self, url):
rel_path = self._parse_url(url)
head, tail = posixpath.split(rel_path)
return _HDFS_PREFIX + head, tail
def mkdirs(self, url):
path = self._parse_url(url)
if self._exists(path):
raise IOError('Path already exists: %s' % path)
return self._mkdirs(path)
def _mkdirs(self, path):
self._hdfs_client.makedirs(path)
def match(self, url_patterns, limits=None):
if limits is None:
limits = [None] * len(url_patterns)
if len(url_patterns) != len(limits):
raise BeamIOError(
'Patterns and limits should be equal in length: %d != %d' % (
len(url_patterns), len(limits)))
# TODO(udim): Update client to allow batched results.
def _match(path_pattern, limit):
"""Find all matching paths to the pattern provided."""
file_infos = self._hdfs_client.ls(path_pattern, detail=True)[:limit]
metadata_list = [FileMetadata(file_info['name'], file_info['size'])
for file_info in file_infos]
return MatchResult(path_pattern, metadata_list)
exceptions = {}
result = []
for url_pattern, limit in zip(url_patterns, limits):
try:
path_pattern = self._parse_url(url_pattern)
result.append(_match(path_pattern, limit))
except Exception as e: # pylint: disable=broad-except
exceptions[url_pattern] = e
if exceptions:
raise BeamIOError('Match operation failed', exceptions)
return result
#.........这里部分代码省略.........