本文整理汇总了Python中mrjob.local.LocalMRJobRunner类的典型用法代码示例。如果您正苦于以下问题:Python LocalMRJobRunner类的具体用法?Python LocalMRJobRunner怎么用?Python LocalMRJobRunner使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LocalMRJobRunner类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_hadoop_output_format
def test_hadoop_output_format(self):
format = "org.apache.hadoop.mapred.SequenceFileOutputFormat"
runner = LocalMRJobRunner(conf_paths=[], hadoop_output_format=format)
self.assertEqual(runner._hadoop_conf_args({}, 0, 1), ["-outputformat", format])
# test multi-step job
self.assertEqual(runner._hadoop_conf_args({}, 0, 2), [])
self.assertEqual(runner._hadoop_conf_args({}, 1, 2), ["-outputformat", format])
示例2: test_empty_jobconf_values
def test_empty_jobconf_values(self):
# value of None means to omit that jobconf
jobconf = {'foo': '', 'bar': None}
runner = LocalMRJobRunner(conf_paths=[], jobconf=jobconf)
self.assertEqual(runner._hadoop_conf_args({}, 0, 1),
['-D', 'foo='])
示例3: test_get_file_splits_test
def test_get_file_splits_test(self):
# set up input paths
input_path = os.path.join(self.tmp_dir, "input")
with open(input_path, "w") as input_file:
input_file.write("bar\nqux\nfoo\nbar\nqux\nfoo\n")
input_path2 = os.path.join(self.tmp_dir, "input2")
with open(input_path2, "wb") as input_file:
input_file.write(b"foo\nbar\nbar\n")
runner = LocalMRJobRunner(conf_paths=[])
# split into 3 files
file_splits = runner._get_file_splits([input_path, input_path2], 3)
# make sure we get 3 files
self.assertEqual(len(file_splits), 3)
# make sure all the data is preserved
content = []
for file_name in file_splits:
with open(file_name, "rb") as f:
content.extend(f.readlines())
self.assertEqual(
sorted(content), [b"bar\n", b"bar\n", b"bar\n", b"bar\n", b"foo\n", b"foo\n", b"foo\n", b"qux\n", b"qux\n"]
)
示例4: test_jobconf_from_step
def test_jobconf_from_step(self):
jobconf = {"FOO": "bar", "BAZ": "qux"}
# Hack in steps rather than creating a new MRJob subclass
runner = LocalMRJobRunner(jobconf=jobconf)
runner._steps = [{"jobconf": {"BAZ": "quux", "BAX": "Arnold"}}]
self.assertEqual(runner._hadoop_args_for_step(0), ["-D", "BAX=Arnold", "-D", "BAZ=quux", "-D", "FOO=bar"])
示例5: test_owner_and_label_kwargs
def test_owner_and_label_kwargs(self):
runner = LocalMRJobRunner(conf_path=False,
owner='ads', label='ads_chain')
match = JOB_NAME_RE.match(runner.get_job_name())
assert_equal(match.group(1), 'ads_chain')
assert_equal(match.group(2), 'ads')
示例6: test_get_file_splits_sorted_test
def test_get_file_splits_sorted_test(self):
# set up input paths
input_path = os.path.join(self.tmp_dir, "input")
with open(input_path, "wb") as input_file:
input_file.write(b"1\tbar\n1\tbar\n1\tbar\n2\tfoo\n2\tfoo\n2\tfoo\n3\tqux\n" b"3\tqux\n3\tqux\n")
runner = LocalMRJobRunner(conf_paths=[])
file_splits = runner._get_file_splits([input_path], 3, keep_sorted=True)
# make sure we get 3 files
self.assertEqual(len(file_splits), 3)
# make sure all the data is preserved in sorted order
content = []
for file_name in sorted(file_splits.keys()):
with open(file_name, "rb") as f:
content.extend(f.readlines())
self.assertEqual(
content,
[
b"1\tbar\n",
b"1\tbar\n",
b"1\tbar\n",
b"2\tfoo\n",
b"2\tfoo\n",
b"2\tfoo\n",
b"3\tqux\n",
b"3\tqux\n",
b"3\tqux\n",
],
)
示例7: test_empty_no_user
def test_empty_no_user(self):
self.getuser_should_fail = True
runner = LocalMRJobRunner(conf_path=False)
match = JOB_NAME_RE.match(runner.get_job_name())
assert_equal(match.group(1), 'no_script')
assert_equal(match.group(2), 'no_user')
示例8: test_auto_owner
def test_auto_owner(self):
os.environ['USER'] = 'mcp'
runner = LocalMRJobRunner(conf_path=False)
match = JOB_NAME_RE.match(runner.get_job_name())
assert_equal(match.group(1), 'no_script')
assert_equal(match.group(2), 'mcp')
示例9: test_get_file_splits_test
def test_get_file_splits_test(self):
# set up input paths
input_path = os.path.join(self.tmp_dir, 'input')
with open(input_path, 'w') as input_file:
input_file.write('bar\nqux\nfoo\nbar\nqux\nfoo\n')
input_path2 = os.path.join(self.tmp_dir, 'input2')
with open(input_path2, 'w') as input_file:
input_file.write('foo\nbar\nbar\n')
runner = LocalMRJobRunner(conf_paths=[])
# split into 3 files
file_splits = runner._get_file_splits([input_path, input_path2], 3)
# make sure we get 3 files
self.assertEqual(len(file_splits), 3)
# make sure all the data is preserved
content = []
for file_name in file_splits:
f = open(file_name)
content.extend(f.readlines())
self.assertEqual(sorted(content),
['bar\n', 'bar\n', 'bar\n', 'bar\n', 'foo\n',
'foo\n', 'foo\n', 'qux\n', 'qux\n'])
示例10: test_get_file_splits_sorted_test
def test_get_file_splits_sorted_test(self):
# set up input paths
input_path = os.path.join(self.tmp_dir, 'input')
with open(input_path, 'w') as input_file:
input_file.write(
'1\tbar\n1\tbar\n1\tbar\n2\tfoo\n2\tfoo\n2\tfoo\n3\tqux\n'
'3\tqux\n3\tqux\n')
runner = LocalMRJobRunner(conf_paths=[])
file_splits = runner._get_file_splits([input_path], 3,
keep_sorted=True)
# make sure we get 3 files
self.assertEqual(len(file_splits), 3)
# make sure all the data is preserved in sorted order
content = []
for file_name in sorted(file_splits.keys()):
f = open(file_name, 'r')
content.extend(f.readlines())
self.assertEqual(content,
['1\tbar\n', '1\tbar\n', '1\tbar\n',
'2\tfoo\n', '2\tfoo\n', '2\tfoo\n',
'3\tqux\n', '3\tqux\n', '3\tqux\n'])
示例11: test_stream_output
def test_stream_output(self):
a_dir_path = os.path.join(self.tmp_dir, 'a')
b_dir_path = os.path.join(self.tmp_dir, 'b')
l_dir_path = os.path.join(self.tmp_dir, '_logs')
os.mkdir(a_dir_path)
os.mkdir(b_dir_path)
os.mkdir(l_dir_path)
a_file_path = os.path.join(a_dir_path, 'part-00000')
b_file_path = os.path.join(b_dir_path, 'part-00001')
c_file_path = os.path.join(self.tmp_dir, 'part-00002')
x_file_path = os.path.join(l_dir_path, 'log.xml')
y_file_path = os.path.join(self.tmp_dir, '_SUCCESS')
with open(a_file_path, 'w') as f:
f.write('A')
with open(b_file_path, 'w') as f:
f.write('B')
with open(c_file_path, 'w') as f:
f.write('C')
with open(x_file_path, 'w') as f:
f.write('<XML XML XML/>')
with open(y_file_path, 'w') as f:
f.write('I win')
runner = LocalMRJobRunner()
runner._output_dir = self.tmp_dir
assert_equal(sorted(runner.stream_output()),
['A', 'B', 'C'])
示例12: _test_spark_executor_memory
def _test_spark_executor_memory(self, conf_value, megs):
runner = LocalMRJobRunner(
jobconf={'spark.executor.memory': conf_value})
self.assertEqual(runner._spark_master(),
'local-cluster[%d,1,%d]' % (
cpu_count(), megs))
示例13: test_partitioner
def test_partitioner(self):
partitioner = 'org.apache.hadoop.mapreduce.Partitioner'
runner = LocalMRJobRunner(conf_paths=[], partitioner=partitioner)
self.assertEqual(runner._hadoop_conf_args({}, 0, 1),
['-D', 'mapred.job.name=None > None',
'-partitioner', partitioner,
])
示例14: test_jobconf
def test_jobconf(self):
jobconf = {"FOO": "bar", "BAZ": "qux", "BAX": "Arnold"}
runner = LocalMRJobRunner(conf_paths=[], jobconf=jobconf)
self.assertEqual(runner._hadoop_conf_args({}, 0, 1), ["-D", "BAX=Arnold", "-D", "BAZ=qux", "-D", "FOO=bar"])
runner = LocalMRJobRunner(conf_paths=[], jobconf=jobconf, hadoop_version="0.18")
self.assertEqual(
runner._hadoop_conf_args({}, 0, 1), ["-jobconf", "BAX=Arnold", "-jobconf", "BAZ=qux", "-jobconf", "FOO=bar"]
)
示例15: test_cmdenv
def test_cmdenv(self):
cmdenv = {'FOO': 'bar', 'BAZ': 'qux', 'BAX': 'Arnold'}
runner = LocalMRJobRunner(conf_paths=[], cmdenv=cmdenv)
self.assertEqual(runner._hadoop_conf_args(0, 1),
['-cmdenv', 'BAX=Arnold',
'-cmdenv', 'BAZ=qux',
'-cmdenv', 'FOO=bar',
])