当前位置: 首页>>代码示例>>Python>>正文


Python patch.object函数代码示例

本文整理汇总了Python中tests.py2.patch.object函数的典型用法代码示例。如果您正苦于以下问题:Python object函数的具体用法?Python object怎么用?Python object使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了object函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: setUp

    def setUp(self):
        super(StreamingArgsTestCase, self).setUp()
        self.runner = HadoopJobRunner(
            hadoop_bin='hadoop', hadoop_streaming_jar='streaming.jar',
            mr_job_script='my_job.py', stdin=BytesIO())
        self.runner._add_job_files_for_upload()

        self.runner._hadoop_version='0.20.204'
        self.start(patch.object(self.runner, '_upload_args',
                                return_value=['new_upload_args']))
        self.start(patch.object(self.runner, '_pre_0_20_upload_args',
                                return_value=['old_upload_args']))
        self.start(patch.object(self.runner, '_hadoop_args_for_step',
                                return_value=['hadoop_args_for_step']))
        self.start(patch.object(self.runner, '_hdfs_step_input_files',
                                return_value=['hdfs_step_input_files']))
        self.start(patch.object(self.runner, '_hdfs_step_output_dir',
                                return_value='hdfs_step_output_dir'))
        self.runner._script_path = 'my_job.py'

        self._new_basic_args = [
            'hadoop', 'jar', 'streaming.jar',
             'new_upload_args', 'hadoop_args_for_step',
             '-input', 'hdfs_step_input_files',
             '-output', 'hdfs_step_output_dir']

        self._old_basic_args = [
            'hadoop', 'jar', 'streaming.jar',
             'hadoop_args_for_step',
             '-input', 'hdfs_step_input_files',
             '-output', 'hdfs_step_output_dir',
             'old_upload_args']
开发者ID:nilesh-molankar,项目名称:mrjob,代码行数:32,代码来源:test_hadoop.py

示例2: test_find_hadoop_streaming_jar

    def test_find_hadoop_streaming_jar(self):
        # not just any jar will do
        with patch.object(os, 'walk', return_value=[
            ('/some_dir', None, 'mason.jar')]):
            self.assertEqual(find_hadoop_streaming_jar('/some_dir'), None)

        # should match streaming jar
        with patch.object(os, 'walk', return_value=[
            ('/some_dir', None, 'hadoop-0.20.2-streaming.jar')]):
            self.assertEqual(find_hadoop_streaming_jar('/some_dir'), None)

        # shouldn't find anything in an empty dir
        with patch.object(os, 'walk', return_value=[]):
            self.assertEqual(find_hadoop_streaming_jar('/some_dir'), None)
开发者ID:nilesh-molankar,项目名称:mrjob,代码行数:14,代码来源:test_hadoop.py

示例3: test_verbose

 def test_verbose(self):
     with patch.object(sys, 'stderr', StringIO()) as stderr:
         MRJob.set_up_logging(verbose=True)
         log = logging.getLogger('__main__')
         log.info('INFO')
         log.debug('DEBUG')
         self.assertEqual(stderr.getvalue(), 'INFO\nDEBUG\n')
开发者ID:Yelp,项目名称:mrjob,代码行数:7,代码来源:test_launch.py

示例4: mrjob_conf_patcher

def mrjob_conf_patcher(substitute_conf=EMPTY_MRJOB_CONF):
    def mock_load_opts_from_mrjob_confs(runner_alias, conf_paths=None):
        return [(None,
                 substitute_conf.get('runners', {}).get(runner_alias, {}))]

    return patch.object(runner, 'load_opts_from_mrjob_confs',
                        mock_load_opts_from_mrjob_confs)
开发者ID:Yelp,项目名称:mrjob,代码行数:7,代码来源:sandbox.py

示例5: test_kill_persistent_cluster

 def test_kill_persistent_cluster(self):
     with no_handlers_for_logger("mrjob.dataproc"):
         r = self._quick_runner()
         with patch.object(mrjob.dataproc.DataprocJobRunner, "_api_cluster_delete") as m:
             r._opts["cluster_id"] = "j-MOCKCLUSTER0"
             r._cleanup_cluster()
             self.assertTrue(m.called)
开发者ID:davidmarin,项目名称:mrjob,代码行数:7,代码来源:test_dataproc.py

示例6: test_path_join

    def test_path_join(self):
        fs = Filesystem()

        with patch.object(fs, 'join'):
            with no_handlers_for_logger('mrjob.fs.base'):
                fs.path_join('foo', 'bar')

            fs.join.assert_called_once_with('foo', 'bar')
开发者ID:kartheek6,项目名称:mrjob,代码行数:8,代码来源:test_base.py

示例7: test_path_exists

    def test_path_exists(self):
        fs = Filesystem()

        with patch.object(fs, "exists"):
            with no_handlers_for_logger("mrjob.fs.base"):
                fs.path_exists("foo")

            fs.exists.assert_called_once_with("foo")
开发者ID:sebratt,项目名称:mrjob,代码行数:8,代码来源:test_base.py

示例8: test_default_options

 def test_default_options(self):
     with no_handlers_for_logger('__main__'):
         with patch.object(sys, 'stderr', StringIO()) as stderr:
             MRJob.set_up_logging()
             log = logging.getLogger('__main__')
             log.info('INFO')
             log.debug('DEBUG')
             self.assertEqual(stderr.getvalue(), 'INFO\n')
开发者ID:etiennebatise,项目名称:mrjob,代码行数:8,代码来源:test_launch.py

示例9: test_put_part_size_mb

    def test_put_part_size_mb(self):
        local_path = self.makefile('foo', contents=b'bar')
        dest = 'gs://bar-files/foo'
        self.storage_client().bucket('bar-files').create()

        with patch.object(GCSFilesystem, '_blob') as blob_meth:
            self.fs.put(local_path, dest, part_size_mb=99999)
            blob_meth.assert_called_once_with(dest, chunk_size=99999)
开发者ID:Affirm,项目名称:mrjob,代码行数:8,代码来源:test_gcs.py

示例10: test_libjars_attr_relative_path

    def test_libjars_attr_relative_path(self):
        job_dir = os.path.dirname(MRJob.mr_job_script())

        with patch.object(MRJob, "LIBJARS", ["cookie.jar", "/left/dora.jar"]):
            job = MRJob()

            self.assertEqual(
                job.job_runner_kwargs()["libjars"], [os.path.join(job_dir, "cookie.jar"), "/left/dora.jar"]
            )
开发者ID:davidmarin,项目名称:mrjob,代码行数:9,代码来源:test_job.py

示例11: setUp

    def setUp(self):
        super(StreamingArgsTestCase, self).setUp()
        self.runner = HadoopJobRunner(
            hadoop_bin='hadoop', hadoop_streaming_jar='<streaming jar>',
            mr_job_script='my_job.py', stdin=BytesIO())
        self.runner._add_job_files_for_upload()

        self.start(patch.object(self.runner, '_upload_args',
                                return_value=['<upload args>']))
        self.start(patch.object(self.runner, '_hadoop_args_for_step',
                                return_value=['<hadoop args for step>']))
        self.start(patch.object(self.runner, '_hdfs_step_input_files',
                                return_value=['<hdfs step input files>']))
        self.start(patch.object(self.runner, '_hdfs_step_output_dir',
                                return_value='<hdfs step output dir>'))
        self.start(patch.object(HadoopFilesystem, 'get_hadoop_version',
                                return_value='2.7.1'))
        self.runner._script_path = 'my_job.py'
开发者ID:Milkigit,项目名称:mrjob,代码行数:18,代码来源:test_hadoop.py

示例12: test_libjars_attr_relative_path

    def test_libjars_attr_relative_path(self):
        job_dir = os.path.dirname(MRJob.mr_job_script())

        with patch.object(MRJob, 'LIBJARS', ['cookie.jar', '/left/dora.jar']):
            job = MRJob()

            self.assertEqual(
                job._runner_kwargs()['libjars'],
                [os.path.join(job_dir, 'cookie.jar'), '/left/dora.jar'])
开发者ID:okomestudio,项目名称:mrjob,代码行数:9,代码来源:test_job.py

示例13: test_kill_cluster_if_successful

 def test_kill_cluster_if_successful(self):
     # If they are setting up the cleanup to kill the cluster, mrjob should
     # kill the cluster independent of job success.
     with no_handlers_for_logger('mrjob.dataproc'):
         r = self._quick_runner()
         with patch.object(mrjob.dataproc.DataprocJobRunner, '_api_cluster_delete') as m:
             r._ran_job = True
             r._cleanup_cluster()
             self.assertTrue(m.called)
开发者ID:Jeremyfanfan,项目名称:mrjob,代码行数:9,代码来源:test_dataproc.py

示例14: test_configuration_translation

    def test_configuration_translation(self):
        job = MRWordCount(["--jobconf", "mapred.jobtracker.maxtasks.per.job=1"])

        with job.make_runner() as runner:
            with no_handlers_for_logger("mrjob.runner"):
                with patch.object(runner, "get_hadoop_version", return_value="2.7.1"):
                    self.assertEqual(
                        runner._hadoop_args_for_step(0),
                        ["-D", "mapred.jobtracker.maxtasks.per.job=1", "-D", "mapreduce.jobtracker.maxtasks.perjob=1"],
                    )
开发者ID:irskep,项目名称:mrjob,代码行数:10,代码来源:test_runner.py

示例15: test_no_mrjob_confs

    def test_no_mrjob_confs(self):
        with patch.object(conf, 'real_mrjob_conf_path', return_value=None):
            mr_job = MRIncrementerJob(['-r', 'inline', '--times', '2'])
            mr_job.sandbox(stdin=BytesIO(b'0\n1\n2\n'))

            with mr_job.make_runner() as runner:
                runner.run()
                output = sorted(mr_job.parse_output_line(line)[1]
                                for line in runner.stream_output())
                self.assertEqual(output, [2, 3, 4])
开发者ID:ashleymiller,项目名称:mrjob,代码行数:10,代码来源:test_inline.py


注:本文中的tests.py2.patch.object函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。