当前位置: 首页>>代码示例>>Python>>正文


Python hdfs3.HDFileSystem类代码示例

本文整理汇总了Python中hdfs3.HDFileSystem的典型用法代码示例。如果您正苦于以下问题:Python HDFileSystem类的具体用法?Python HDFileSystem怎么用?Python HDFileSystem使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了HDFileSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: read_block_from_hdfs

def read_block_from_hdfs(filename, offset, length, host=None, port=None,
        delimiter=None):
    from locket import lock_file
    with lock_file('.lock'):
        hdfs = HDFileSystem(host=host, port=port)
        bytes = hdfs.read_block(filename, offset, length, delimiter)
    return bytes
开发者ID:broxtronix,项目名称:distributed,代码行数:7,代码来源:hdfs.py

示例2: test_connection_error

def test_connection_error():
    with pytest.raises(RuntimeError) as ctx:
        hdfs = HDFileSystem(host='localhost', port=9999, connect=False)
        hdfs.connect()
    # error message is long and with java exceptions, so here we just check
    # that important part of error is present
    msg = 'Caused by: HdfsNetworkConnectException: Connect to "localhost:9999"'
    assert msg in str(ctx.value)
开发者ID:bdrosen96,项目名称:hdfs3,代码行数:8,代码来源:test_hdfs3.py

示例3: read_block_from_hdfs

def read_block_from_hdfs(host, port, filename, offset, length, delimiter):
    from hdfs3 import HDFileSystem
    if sys.version_info[0] == 2:
        from locket import lock_file
        with lock_file('.lock'):
            hdfs = HDFileSystem(host=host, port=port)
            bytes = hdfs.read_block(filename, offset, length, delimiter)
    else:
        hdfs = HDFileSystem(host=host, port=port)
        bytes = hdfs.read_block(filename, offset, length, delimiter)
    return bytes
开发者ID:kevineriklee,项目名称:distributed,代码行数:11,代码来源:hdfs.py

示例4: hdfs

def hdfs():
    hdfs = HDFileSystem(host='localhost', port=8020)
    if hdfs.exists('/tmp/test'):
        hdfs.rm('/tmp/test')
    hdfs.mkdir('/tmp/test')

    yield hdfs

    if hdfs.exists('/tmp/test'):
        hdfs.rm('/tmp/test')
开发者ID:bdrosen96,项目名称:hdfs3,代码行数:10,代码来源:test_hdfs3.py

示例5: make_hdfs

def make_hdfs():
    hdfs = HDFileSystem(host='localhost', port=8020)
    if hdfs.exists('/tmp/test'):
        hdfs.rm('/tmp/test')
    hdfs.mkdir('/tmp/test')

    try:
        yield hdfs
    finally:
        if hdfs.exists('/tmp/test'):
            hdfs.rm('/tmp/test')
开发者ID:minrk,项目名称:distributed,代码行数:11,代码来源:test_hdfs.py

示例6: __init__

  def __init__(self):
    """Initializes a connection to HDFS.

    Connection configuration is done using :doc:`hdfs`.
    """
    super(HadoopFileSystem, self).__init__()
    self._hdfs_client = HDFileSystem()
开发者ID:aaltay,项目名称:incubator-beam,代码行数:7,代码来源:hadoopfilesystem.py

示例7: make_hdfs

def make_hdfs():
    from hdfs3 import HDFileSystem
    # from .hdfs import DaskHDFileSystem
    basedir = '/tmp/test-distributed'
    hdfs = HDFileSystem(host='localhost', port=8020)
    if hdfs.exists(basedir):
        hdfs.rm(basedir)
    hdfs.mkdir(basedir)

    try:
        yield hdfs, basedir
    finally:
        if hdfs.exists(basedir):
            hdfs.rm(basedir)
开发者ID:dask,项目名称:distributed,代码行数:14,代码来源:utils_test.py

示例8: get_block_locations

 def get_block_locations(self, paths):
     offsets = []
     lengths = []
     machines = []
     for path in paths:
         if path.startswith('hdfs://'):
             path = path[len('hdfs://'):]
         out = HDFileSystem.get_block_locations(self, path)
         offsets.append([o['offset'] for o in out])
         lengths.append([o['length'] for o in out])
         machines.append([o['hosts'] for o in out])
     return offsets, lengths, machines
开发者ID:dask,项目名称:distributed,代码行数:12,代码来源:hdfs.py

示例9: hdfs_open_file

def hdfs_open_file(path, auth):
    hdfs = HDFileSystem(**auth)
    return hdfs.open(path, mode='rb')
开发者ID:broxtronix,项目名称:distributed,代码行数:3,代码来源:hdfs.py

示例10: HDFileSystem

from hdfs3 import HDFileSystem
import os

os.environp['HADOOP_USER_NAME']=hadoop

hdfs = HDFileSystem(host='trevally.amer.nevint.com', port=9000)

print hdfs.ls('/user/hadoop')
开发者ID:blueskywalker,项目名称:junkyard,代码行数:8,代码来源:hdfs3.py

示例11: __init__

 def __init__(self, **kwargs):
     kwargs2 = {k: v for k, v in kwargs.items()
                if k in ['host', 'port', 'user', 'ticket_cache',
                         'token', 'pars']}
     HDFileSystem.__init__(self, connect=True, **kwargs2)
开发者ID:dask,项目名称:distributed,代码行数:5,代码来源:hdfs.py

示例12: import

from tornado import gen

from dask.imperative import Value

from distributed.utils_test import gen_cluster, cluster, loop, make_hdfs
from distributed.utils import get_ip
from distributed.hdfs import (read_bytes, get_block_locations, write_bytes,
        _read_csv, read_csv)
from distributed import Executor
from distributed.executor import _wait, Future


pytest.importorskip('hdfs3')
from hdfs3 import HDFileSystem
try:
    hdfs = HDFileSystem(host='localhost', port=8020)
    hdfs.df()
    del hdfs
except:
    pytestmark = pytest.mark.skipif('True')


ip = get_ip()


def test_get_block_locations():
    with make_hdfs() as hdfs:
        data = b'a' * int(1e8)  # todo: reduce block size to speed up test
        fn_1 = '/tmp/test/file1'
        fn_2 = '/tmp/test/file2'
开发者ID:kevineriklee,项目名称:distributed,代码行数:30,代码来源:test_hdfs.py

示例13: open

 def open(self, path, mode='rb', **kwargs):
     if path.startswith('hdfs://'):
         path = path[len('hdfs://'):]
     return HDFileSystem.open(self, path, mode, **kwargs)
开发者ID:dask,项目名称:distributed,代码行数:4,代码来源:hdfs.py

示例14: hdfs_open_file

def hdfs_open_file(path, auth):
    from hdfs3 import HDFileSystem
    hdfs = HDFileSystem(**auth)
    return hdfs.open(path, mode='rb')
开发者ID:frol,项目名称:distributed,代码行数:4,代码来源:hdfs.py

示例15: open

    def open(self, path, mode='rb'):
        mode = mode.rstrip('b')
        return open(path, mode)

    def put(self, src, dst):
        return shutil.copy(src, dst)

if __name__ == "__main__":
    # load the hdfs node info
    f = open('hdfs.yml', 'r')
    data = yaml.load(f)
    f.close()

    hdfs_nn = data['hdfs_nn']
    hdfs = HDFileSystem(host=hdfs_nn, port=data['hdfs_port'])

    tfs = TransparentFileSystem(hdfs)
    print hdfs.exists('/tmp')
    # print hdfs.hoge('/tmp')
    print tfs.exists('/tmp')
    # print tfs.hoge('/tmp')

    # tfs_local = TransparentFileSystem()
    # print tfs_local.glob('/var/tmp')

    print 'test'
    print tfs.glob('/tmp')
    # tfs.hoge()
    tfs_local = TransparentFileSystem()
    # print tfs_local.glob('/home/vagrant/work/data/*')
开发者ID:hirolovesbeer,项目名称:tfs,代码行数:30,代码来源:hdfs_wrapper.py


注:本文中的hdfs3.HDFileSystem类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。