当前位置: 首页>>代码示例>>Python>>正文


Python UPLOAD_CHUNK_SIZE.get方法代码示例

本文整理汇总了Python中hadoop.conf.UPLOAD_CHUNK_SIZE.get方法的典型用法代码示例。如果您正苦于以下问题:Python UPLOAD_CHUNK_SIZE.get方法的具体用法?Python UPLOAD_CHUNK_SIZE.get怎么用?Python UPLOAD_CHUNK_SIZE.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在hadoop.conf.UPLOAD_CHUNK_SIZE的用法示例。


在下文中一共展示了UPLOAD_CHUNK_SIZE.get方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: copyfile

# 需要导入模块: from hadoop.conf import UPLOAD_CHUNK_SIZE [as 别名]
# 或者: from hadoop.conf.UPLOAD_CHUNK_SIZE import get [as 别名]
  def copyfile(self, src, dst, skip_header=False):
    sb = self._stats(src)
    if sb is None:
      raise IOError(errno.ENOENT, _("Copy src '%s' does not exist") % src)
    if sb.isDir:
      raise IOError(errno.INVAL, _("Copy src '%s' is a directory") % src)
    if self.isdir(dst):
      raise IOError(errno.INVAL, _("Copy dst '%s' is a directory") % dst)

    offset = 0

    while True:
      data = self.read(src, offset, UPLOAD_CHUNK_SIZE.get())
      cnt = len(data)

      if offset == 0:
        if skip_header:
          n = data.index('\n')
          if n > 0:
            data = data[n + 1:]
        self.create(dst,
                    overwrite=True,
                    blocksize=sb.blockSize,
                    replication=sb.replication,
                    permission=oct(stat.S_IMODE(sb.mode)),
                    data=data)
      else:
        self.append(dst, data)

      if cnt < UPLOAD_CHUNK_SIZE.get():
        break

      offset += cnt
开发者ID:mbrukman,项目名称:cloudera-hue,代码行数:35,代码来源:webhdfs.py

示例2: copyfile

# 需要导入模块: from hadoop.conf import UPLOAD_CHUNK_SIZE [as 别名]
# 或者: from hadoop.conf.UPLOAD_CHUNK_SIZE import get [as 别名]
  def copyfile(self, src, dst):
    sb = self._stats(src)
    if sb is None:
      raise IOError(errno.ENOENT, "Copy src '%s' does not exist" % (src,))
    if sb.isDir:
      raise IOError(errno.INVAL, "Copy src '%s' is a directory" % (src,))
    if self.isdir(dst):
      raise IOError(errno.INVAL, "Copy dst '%s' is a directory" % (dst,))

    offset = 0

    while True:
      data = self.read(src, offset, UPLOAD_CHUNK_SIZE.get())
      if offset == 0:
        self.create(dst,
                    overwrite=True,
                    blocksize=sb.blockSize,
                    replication=sb.replication,
                    permission=oct(stat.S_IMODE(sb.mode)),
                    data=data)

      cnt = len(data)
      if cnt == 0:
        break

      if offset != 0:
        self.append(dst, data)
      offset += cnt
开发者ID:DatalakeInc,项目名称:hortonworks-sandbox,代码行数:30,代码来源:webhdfs.py

示例3: __init__

# 需要导入模块: from hadoop.conf import UPLOAD_CHUNK_SIZE [as 别名]
# 或者: from hadoop.conf.UPLOAD_CHUNK_SIZE import get [as 别名]
 def __init__(self, request):
   FileUploadHandler.__init__(self, request)
   self._file = None
   self._starttime = 0
   self._activated = False
   # Need to directly modify FileUploadHandler.chunk_size
   FileUploadHandler.chunk_size = UPLOAD_CHUNK_SIZE.get()
开发者ID:DatalakeInc,项目名称:hortonworks-sandbox,代码行数:9,代码来源:upload.py

示例4: read_in_chunks

# 需要导入模块: from hadoop.conf import UPLOAD_CHUNK_SIZE [as 别名]
# 或者: from hadoop.conf.UPLOAD_CHUNK_SIZE import get [as 别名]
 def read_in_chunks(fs, path, offset=0):
   while True:
     chunk = fs.read(path, offset, UPLOAD_CHUNK_SIZE.get())
     if chunk:
       offset += len(chunk)
       yield chunk
     else:
       return
开发者ID:OpenPOWER-BigData,项目名称:HDP-hue,代码行数:10,代码来源:create_table.py

示例5: test_upload_file

# 需要导入模块: from hadoop.conf import UPLOAD_CHUNK_SIZE [as 别名]
# 或者: from hadoop.conf.UPLOAD_CHUNK_SIZE import get [as 别名]
  def test_upload_file(self):
    with tempfile.NamedTemporaryFile() as local_file:
      # Make sure we can upload larger than the UPLOAD chunk size
      file_size = UPLOAD_CHUNK_SIZE.get() * 2
      local_file.write('0' * file_size)
      local_file.flush()

      prefix = self.cluster.fs_prefix + '/test_upload_file'
      self.cluster.fs.mkdir(prefix)

      USER_NAME = 'test'
      HDFS_DEST_DIR = prefix + "/tmp/fb-upload-test"
      LOCAL_FILE = local_file.name
      HDFS_FILE = HDFS_DEST_DIR + '/' + os.path.basename(LOCAL_FILE)

      self.cluster.fs.do_as_superuser(self.cluster.fs.mkdir, HDFS_DEST_DIR)
      self.cluster.fs.do_as_superuser(self.cluster.fs.chown, HDFS_DEST_DIR, USER_NAME, USER_NAME)
      self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, HDFS_DEST_DIR, 0700)

      stats = self.cluster.fs.stats(HDFS_DEST_DIR)
      assert_equal(stats['user'], USER_NAME)
      assert_equal(stats['group'], USER_NAME)

      # Just upload the current python file
      resp = self.c.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR, # GET param avoids infinite looping
                         dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
      response = json.loads(resp.content)

      assert_equal(0, response['status'], response)
      stats = self.cluster.fs.stats(HDFS_FILE)
      assert_equal(stats['user'], USER_NAME)
      assert_equal(stats['group'], USER_NAME)

      f = self.cluster.fs.open(HDFS_FILE)
      actual = f.read(file_size)
      expected = file(LOCAL_FILE).read()
      assert_equal(actual, expected, 'files do not match: %s != %s' % (len(actual), len(expected)))

      # Upload again and so fails because file already exits
      resp = self.c.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
                         dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
      response = json.loads(resp.content)
      assert_equal(-1, response['status'], response)
      assert_true('already exists' in response['data'], response)

      # Upload in / and fails because of missing permissions
      not_me = make_logged_in_client("not_me", is_superuser=False)
      grant_access("not_me", "not_me", "filebrowser")
      try:
        resp = not_me.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
                           dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
        response = json.loads(resp.content)
        assert_equal(-1, response['status'], response)
        assert_true('Permission denied' in response['data'], response)
      except AttributeError:
        # Seems like a Django bug.
        # StopFutureHandlers() does not seem to work in test mode as it continues to MemoryFileUploadHandler after perm issue and so fails.
        pass
开发者ID:CaeserNieh,项目名称:hue,代码行数:60,代码来源:views_test.py

示例6: __init__

# 需要导入模块: from hadoop.conf import UPLOAD_CHUNK_SIZE [as 别名]
# 或者: from hadoop.conf.UPLOAD_CHUNK_SIZE import get [as 别名]
 def __init__(self, request):
   FileUploadHandler.__init__(self, request)
   self._file = None
   self._starttime = 0
   self._activated = False
   self._destination = request.GET.get('dest', None) # GET param avoids infinite looping
   self.request = request
   # Need to directly modify FileUploadHandler.chunk_size
   FileUploadHandler.chunk_size = UPLOAD_CHUNK_SIZE.get()
开发者ID:2013Commons,项目名称:HUE-SHARK,代码行数:11,代码来源:upload.py


注:本文中的hadoop.conf.UPLOAD_CHUNK_SIZE.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。