当前位置: 首页>>代码示例>>Python>>正文


Python hdfs.hdfs函数代码示例

本文整理汇总了Python中pydoop.hdfs.hdfs函数的典型用法代码示例。如果您正苦于以下问题:Python hdfs函数的具体用法?Python hdfs怎么用?Python hdfs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了hdfs函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: write

def write(writeFlag):
    if (writeFlag == True):
        # instantiate hadoop
        hdfs.hdfs()
        
        targetPath = config.targetPath;
        targetDirectory = config.targetDirectory;
        sourceFile = config.sourceFile;
        
        print("Target Path: " + targetPath);
        print("Target Directory: " + targetDirectory);
        print("Source Path: " + sourceFile);
        
        dumpFile = open(sourceFile, "r");
        fullText = dumpFile.read();
        dumpFile.close();
        
        # write to hadoop
        #hdfs.mkdir(targetDirectory)
        hdfs.dump(fullText, targetPath)
#hdfs.cp(sourceFile, targetPath)

#print (hdfs.ls("test4"))
#files = hdfs.ls("test4")

# read from hadoop
#hdfs.get("test4/hello.txt", "/tmp/hello.txt")
#with open("/tmp/hello.txt") as f:
#	print f.read()

#print(hdfs.ls("test", "hduser1"))
#text = hdfs.load("test/hello.txt")
#print text
开发者ID:davedwards,项目名称:beautiful-data,代码行数:33,代码来源:hadoopWriter.py

示例2: tearDown

 def tearDown(self):
   fs = hdfs.hdfs("", 0)
   fs.delete(self.local_wd)
   fs.close()
   fs = hdfs.hdfs("default", 0)
   fs.delete(self.hdfs_wd)
   fs.close()
开发者ID:ZEMUSHKA,项目名称:pydoop,代码行数:7,代码来源:test_hdfs.py

示例3: capacity

 def capacity(self):
     fs = hdfs.hdfs("", 0)
     self.assertRaises(RuntimeError, fs.capacity)
     fs.close()
     if not hdfs.default_is_local():
         fs = hdfs.hdfs("default", 0)
         cap = fs.capacity()
         self.assertGreaterEqual(cap, 0)
开发者ID:kikkomep,项目名称:pydoop,代码行数:8,代码来源:test_hdfs.py

示例4: cache

 def cache(self):
     orig_fs = hdfs.hdfs(*self.hp_cases[0])
     for host, port in self.hp_cases[1:]:
         fs = hdfs.hdfs(host, port)
         self.assertTrue(fs.fs is orig_fs.fs)
         fs.close()
         self.assertFalse(orig_fs.closed)
     orig_fs.close()
     self.assertTrue(orig_fs.closed)
开发者ID:jkahn,项目名称:pydoop-code,代码行数:9,代码来源:test_hdfs_fs.py

示例5: cache

 def cache(self):
     for (h1, p1), (h2, p2) in product(self.hp_cases, repeat=2):
         hdfs.hdfs._CACHE.clear()
         hdfs.hdfs._ALIASES = {"host": {}, "port": {}, "user": {}}  # FIXME
         with hdfs.hdfs(h1, p1) as fs1:
             with hdfs.hdfs(h2, p2) as fs2:
                 print ' * %r vs %r' % ((h1, p1), (h2, p2))
                 self.assertTrue(fs2.fs is fs1.fs)
             for fs in fs1, fs2:
                 self.assertFalse(fs.closed)
         for fs in fs1, fs2:
             self.assertTrue(fs.closed)
开发者ID:CynthiaYiqingHuang,项目名称:pydoop,代码行数:12,代码来源:test_hdfs_fs.py

示例6: _hdfs_filesystem

def _hdfs_filesystem():
    """Retrieve references to the local and HDFS file system.

    Need to be able to specify host/port. For now, works off defaults.
    """
    fs = hdfs("default", 0)
    lfs = hdfs("", 0)
    try:
        yield fs, lfs
    finally:
        fs.close()
        lfs.close()
开发者ID:CosteaPaul,项目名称:bcbb,代码行数:12,代码来源:hadoop_run.py

示例7: read

def read(readFlag):
    print(readFlag);
    if (readFlag == True):
        targetFile = config.targetFile.strip()
        targetDirectory = config.targetDirectory.strip()
        targetPath = config.targetPath
        
        print(targetPath)
        
        # instantiate hadoop
        hdfs.hdfs()
        
        # read from hadoop
        fileToRead = hdfs.open(targetPath)
        print(fileToRead.read())
开发者ID:davedwards,项目名称:beautiful-data,代码行数:15,代码来源:hadoopReader.py

示例8: __warn_user_if_wd_maybe_unreadable

  def __warn_user_if_wd_maybe_unreadable(self, abs_remote_path):
    """
    Check directories above the remote module and issue a warning if
    they are not traversable by all users.

    The reasoning behind this is mainly aimed at set-ups with a centralized
    Hadoop cluster, accessed by all users, and where the Hadoop task tracker
    user is not a superuser; an example may be if you're running a shared
    Hadoop without HDFS (using only a POSIX shared file system).  The task
    tracker correctly changes user to the job requester's user for most
    operations, but not when initializing the distributed cache, so jobs who
    want to place files not accessible by the Hadoop user into dist cache fail.
    """
    host, port, path = hdfs.path.split(abs_remote_path)
    if host == '' and port == 0: # local file system
      host_port = "file:///"
    else:
      # FIXME: this won't work with any scheme other than hdfs:// (e.g., s3)
      host_port = "hdfs://%s:%s/" % (host, port)
    path_pieces = path.strip('/').split(os.path.sep)
    fs = hdfs.hdfs(host, port)
    for i in xrange(0, len(path_pieces)):
      part = os.path.join(host_port, os.path.sep.join(path_pieces[0:i+1]))
      permissions = fs.get_path_info(part)['permissions']
      if permissions & 0111 != 0111:
        self.logger.warning(
          "the remote module %s may not be readable\n" +
          "by the task tracker when initializing the distributed cache.\n" +
          "Permissions on path %s: %s", abs_remote_path, part, oct(permissions))
        break
开发者ID:ilveroluca,项目名称:pydoop,代码行数:30,代码来源:script.py

示例9: setUp

 def setUp(self):
   if hdfs.default_is_local():
     self.root = "file:"
   else:
     fs = hdfs.hdfs("default", 0)
     self.root = "hdfs://%s:%s" % (fs.host, fs.port)
     fs.close()
开发者ID:ilveroluca,项目名称:pydoop,代码行数:7,代码来源:test_path.py

示例10: connect

 def connect(self):
     for host, port in self.hp_cases:
         for user in self.u_cases:
             expected_user = user or CURRENT_USER
             fs = hdfs.hdfs(host, port, user=user)
             self.assertEqual(fs.user, expected_user)
             fs.close()
开发者ID:kmatzen,项目名称:pydoop,代码行数:7,代码来源:test_hdfs_fs.py

示例11: run

    def run(self):
        if self.options is None:
            raise RuntimeError("You must call parse_cmd_line before run")

        if self.logger.isEnabledFor(logging.DEBUG):
            self.logger.debug("Running Seqal")
            self.logger.debug("Properties:\n%s", "\n".join( sorted([ "%s = %s" % (str(k), str(v)) for k,v in self.properties.iteritems() ]) ))
        self.logger.info("Input: %s; Output: %s; reference: %s", self.options.input, self.options.output, self.options.reference)

        try:
            self.hdfs = phdfs.hdfs('default', 0)
            self.__validate()

            self.remote_bin_name = tempfile.mktemp(prefix='seqal_bin.', suffix=str(random.random()), dir='')
            try:
                with self.hdfs.open_file(self.remote_bin_name, 'w') as script:
                    self.__write_pipes_script(script)

                full_name = self.hdfs.get_path_info(self.remote_bin_name)['name']

                return seal_utilities.run_pipes(full_name, self.options.input, self.options.output,
                    properties=self.properties, args_list=self.left_over_args)
            finally:
                try:
                    self.hdfs.delete(self.remote_bin_name) # delete the temporary pipes script from HDFS
                    self.logger.debug("pipes script %s deleted", self.remote_bin_name)
                except:
                    self.logger.error("Error deleting the temporary pipes script %s from HDFS", self.remote_bin_name)
                    ## don't re-raise the exception.  We're on our way out
        finally:
            if self.hdfs:
                tmp = self.hdfs
                self.hdfs = None
                tmp.close()
                self.logger.debug("HDFS closed")
开发者ID:pinno,项目名称:seal,代码行数:35,代码来源:seqal_run.py

示例12: stat_on_local

 def stat_on_local(self):
     wd_ = tempfile.mkdtemp(prefix='pydoop_', suffix=UNI_CHR)
     p_ = os.path.join(wd_, make_random_str())
     if hdfs.default_is_local():
         wd, p = wd_, p_
         host = "default"
     else:
         wd, p = ('file:%s' % _ for _ in (wd_, p_))
         host = ""
     fs = hdfs.hdfs(host, 0)
     with fs.open_file(p_, 'w') as fo:
         fo.write(make_random_str())
     info = fs.get_path_info(p_)
     fs.close()
     s = hdfs.path.stat(p)
     os_s = os.stat(p_)
     for n in dir(s):
         if n.startswith('st_'):
             try:
                 exp_v = getattr(os_s, n)
             except AttributeError:
                 try:
                     exp_v = info[self.NMAP[n]]
                 except KeyError:
                     continue
                 self.assertEqual(getattr(s, n), exp_v)
     self.__check_extra_args(s, info)
     self.__check_wrapper_funcs(p)
     hdfs.rmr(wd)
开发者ID:kikkomep,项目名称:pydoop,代码行数:29,代码来源:test_path.py

示例13: get_res

def get_res(output_dir):
    fs = hdfs()
    data = []
    for x in fs.list_directory(output_dir):
        if os.path.split(x['path'])[-1].startswith('part-'):
            with fs.open_file(x['path']) as f:
                data.append(f.read())
    all_data = ''.join(data)
    return pts.parse_mr_output(all_data, vtype=int)
开发者ID:kikkomep,项目名称:pydoop,代码行数:9,代码来源:check_results.py

示例14: __init__

 def __init__(self, context):
     super(FastaReader, self).__init__()
     self.logger = logging.getLogger(self.__class__.__name__)
     self.isplit = InputSplit(context.getInputSplit())
     self.host, self.port, self.fpath = split_hdfs_path(self.isplit.filename)
     self.fs = hdfs(self.host, self.port)
     self.file = self.fs.open_file(self.fpath, os.O_RDONLY)
     self._iterator = (SeqIO.parse(self.file, "fasta") if
                       self.isplit.offset == 0 else None)
开发者ID:16NWallace,项目名称:bcbb,代码行数:9,代码来源:distblast_pipes.py

示例15: compute_vc

def compute_vc(input_dir):
    fs = hdfs()
    data = []
    for x in fs.list_directory(input_dir):
        with fs.open_file(x['path']) as f:
            data.append(f.read())
    all_data = ''.join(data)
    vowels = re.findall('[AEIOUY]', all_data.upper())
    return Counter(vowels)
开发者ID:kikkomep,项目名称:pydoop,代码行数:9,代码来源:check_results.py


注:本文中的pydoop.hdfs.hdfs函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。