本文整理汇总了Python中liboozie.oozie_api_test.OozieServerProvider.wait_until_completion方法的典型用法代码示例。如果您正苦于以下问题:Python OozieServerProvider.wait_until_completion方法的具体用法?Python OozieServerProvider.wait_until_completion怎么用?Python OozieServerProvider.wait_until_completion使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类liboozie.oozie_api_test.OozieServerProvider
的用法示例。
在下文中一共展示了OozieServerProvider.wait_until_completion方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp
# 需要导入模块: from liboozie.oozie_api_test import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_test.OozieServerProvider import wait_until_completion [as 别名]
def setUp(self):
"""
To clean: creating test1, test2, test3...users
"""
TestJobBrowserWithHadoop.user_count += 1
self.username = 'test' + str(TestJobBrowserWithHadoop.user_count)
self.home_dir = '/user/%s' % self.username
self.cluster.fs.do_as_user(self.username, self.cluster.fs.create_home_dir, self.home_dir)
self.client = make_logged_in_client(username=self.username, is_superuser=False, groupname='test')
self.user = User.objects.get(username=self.username)
grant_access(self.username, 'test', 'jobsub')
grant_access(self.username, 'test', 'jobbrowser')
grant_access(self.username, 'test', 'oozie')
add_to_group(self.username)
self.prev_user = self.cluster.fs.user
self.cluster.fs.setuser(self.username)
self.install_examples()
self.design = self.create_design()
# Run the sleep example, since it doesn't require user home directory
design_id = self.design.id
response = self.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
self.hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
self.hadoop_job_id_short = views.get_shorter_id(self.hadoop_job_id)
示例2: test_job
# 需要导入模块: from liboozie.oozie_api_test import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_test.OozieServerProvider import wait_until_completion [as 别名]
def test_job(self):
"""
Test new job views.
The status of the jobs should be the same as the status reported back by oozie.
In this case, all jobs should succeed.
"""
# Run the sleep example, since it doesn't require user home directory
design_id = self.design.id
response = self.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/')
assert_true(hadoop_job_id_short in response.content, response.content)
# Make sure job succeeded
response = self.client.get('/jobbrowser/jobs/?state=completed')
assert_true(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=failed')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=running')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=killed')
assert_false(hadoop_job_id_short in response.content)
# Check sharing permissions
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_false(hadoop_job_id_short in response.content)
finally:
finish()
# Single job page
response = self.client.get('/jobbrowser/jobs/%s' % hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
# We can't just check the complete contents of the python map because the
# SLOTS_MILLIS_* entries have a variable number of milliseconds from
# run-to-run.
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_MAPS']['total'], 2L)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_REDUCES']['total'], 1L)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_MAPS']['total'], 0L)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_REDUCES']['total'], 0L)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_MAPS']['total'] > 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_REDUCES']['total'] > 0)
# There should be 4 tasks for this job: cleanup, setup, map, reduce
response = self.client.get('/jobbrowser/jobs/%s/tasks' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by text
#.........这里部分代码省略.........
示例3: test_failed_jobs
# 需要导入模块: from liboozie.oozie_api_test import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_test.OozieServerProvider import wait_until_completion [as 别名]
def test_failed_jobs(self):
"""
Test jobs with genuine failure, not just killed
"""
# Create design that will fail because the script file isn't there
INPUT_DIR = self.home_dir + '/input'
OUTPUT_DIR = self.home_dir + '/output'
try:
self.cluster.fs.mkdir(self.home_dir + "/jt-test_failed_jobs")
self.cluster.fs.mkdir(INPUT_DIR)
self.cluster.fs.rmtree(OUTPUT_DIR)
except:
# rmtree probably failed here.
pass
response = self.client.post(reverse('jobsub.views.new_design', kwargs={'node_type': 'mapreduce'}), {
'name': ['test_failed_jobs-1'],
'description': ['description test_failed_jobs-1'],
'args': '',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'archives': '[]',
'files': '[]',
'job_properties': ['[{"name":"mapred.input.dir","value":"%s"},\
{"name":"mapred.output.dir","value":"%s"},\
{"name":"mapred.mapper.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.combiner.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.reducer.class","value":"org.apache.hadoop.mapred.lib.dne"}]' % (INPUT_DIR, OUTPUT_DIR)]
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', follow=True)
# Submit the job
design_dict = json.loads(response.content)
design_id = int(design_dict['id'])
response = self.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
job = OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# Select only killed jobs (should be absent)
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/?state=killed')
assert_false(hadoop_job_id_short in response.content)
# Select only failed jobs (should be present)
# Map job should succeed. Reduce job should fail.
response = self.client.get('/jobbrowser/jobs/?state=failed')
assert_true(hadoop_job_id_short in response.content)
# The single job view should have the failed task table
response = self.client.get('/jobbrowser/jobs/%s' % (hadoop_job_id,))
html = response.content.lower()
assert_true('failed task' in html)
# The map task should say success (empty input)
map_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, map_task_id))
assert_true('succeed' in response.content)
assert_true('failed' not in response.content)
# The reduce task should say failed
reduce_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, reduce_task_id))
assert_true('succeed' not in response.content)
assert_true('failed' in response.content)
# Selecting by failed state should include the failed map
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id,))
assert_true('r_000000' in response.content)
assert_true('m_000000' not in response.content)
示例4: test_job
# 需要导入模块: from liboozie.oozie_api_test import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_test.OozieServerProvider import wait_until_completion [as 别名]
def test_job(self):
"""
Test new job views.
The status of the jobs should be the same as the status reported back by oozie.
In this case, all jobs should succeed.
"""
# Clone design
assert_equal(0, OozieDesign.objects.filter(owner__username=self.username).count())
self.client.post('/jobsub/clone_design/%d' % self.sleep_design_id)
assert_equal(1, OozieDesign.objects.filter(owner__username=self.username).count())
# Run the sleep example, since it doesn't require user home directory
design_id = OozieDesign.objects.get(owner__username=self.username).id
response = self.client.post("/jobsub/submit_design/%d" % (design_id,),
dict(map_sleep_time=1,
num_maps=1,
num_reduces=1,
reduce_sleep_time=1),
follow=True)
oozie_jobid = response.context['jobid']
job = OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/')
assert_true(hadoop_job_id_short in response.content)
# Make sure job succeeded
response = self.client.get('/jobbrowser/jobs/?state=completed')
assert_true(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=failed')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=running')
assert_false(hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?state=killed')
assert_false(hadoop_job_id_short in response.content)
# Check sharing permissions
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_false(hadoop_job_id_short in response.content)
finally:
finish()
# Single job page
response = self.client.get('/jobbrowser/jobs/%s' % hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
# We can't just check the complete contents of the python map because the
# SLOTS_MILLIS_* entries have a variable number of milliseconds from
# run-to-run.
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_MAPS']['total'], 1)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_REDUCES']['total'], 1)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_MAPS']['total'], 0)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_REDUCES']['total'], 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_MAPS']['total'] > 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_REDUCES']['total'] > 0)
# There should be 4 tasks for this job: cleanup, setup, map, reduce
response = self.client.get('/jobbrowser/jobs/%s/tasks' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
#.........这里部分代码省略.........
示例5: test_failed_jobs
# 需要导入模块: from liboozie.oozie_api_test import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_test.OozieServerProvider import wait_until_completion [as 别名]
def test_failed_jobs(self):
"""
Test jobs with genuine failure, not just killed
"""
# Create design that will fail because the script file isn't there
INPUT_DIR = self.home_dir + '/input'
OUTPUT_DIR = self.home_dir + '/output'
try:
self.cluster.fs.mkdir(self.home_dir + "/jt-test_failed_jobs")
self.cluster.fs.mkdir(INPUT_DIR)
self.cluster.fs.rmtree(OUTPUT_DIR)
except:
# rmtree probably failed here.
pass
response = self.client.post('/jobsub/new_design/mapreduce', {
'wf-name': ['test_failed_jobs-1'],
'wf-description': ['description test_failed_jobs-1'],
'action-args': [''],
'action-jar_path': ['/user/hue/jobsub/examples/hadoop-examples.jar'],
'action-archives': ['[]'],
'action-job_properties': ['[{"name":"mapred.input.dir","value":"%s"},\
{"name":"mapred.output.dir","value":"%s"},\
{"name":"mapred.mapper.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.combiner.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.reducer.class","value":"org.apache.hadoop.mapred.lib.dne"}]' % (INPUT_DIR, OUTPUT_DIR)],
'action-files': ['[]']}, follow=True)
designs = json.loads(response.context['designs'])
# Submit the job
design_id = designs[0]['id']
response = self.client.post("/jobsub/submit_design/%d" % design_id, follow=True)
oozie_jobid = response.context['jobid']
OozieServerProvider.wait_until_completion(oozie_jobid, timeout=500, step=1)
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# Select only killed jobs (should be absent)
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/?state=killed')
assert_false(hadoop_job_id_short in response.content)
# Select only failed jobs (should be present)
# Map job should succeed. Reduce job should fail.
response = self.client.get('/jobbrowser/jobs/?state=failed')
assert_true(hadoop_job_id_short in response.content)
# The single job view should have the failed task table
response = self.client.get('/jobbrowser/jobs/%s' % (hadoop_job_id,))
html = response.content.lower()
assert_true('failed task' in html)
# The map task should say success (empty input)
map_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, map_task_id))
assert_true('succeed' in response.content)
assert_true('failed' not in response.content)
# The reduce task should say failed
reduce_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, reduce_task_id))
assert_true('succeed' not in response.content)
assert_true('failed' in response.content)
# Selecting by failed state should include the failed map
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id,))
assert_true('r_000000' in response.content)
assert_true('m_000000' not in response.content)
示例6: test_jobsub_setup_and_run_samples
# 需要导入模块: from liboozie.oozie_api_test import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_test.OozieServerProvider import wait_until_completion [as 别名]
def test_jobsub_setup_and_run_samples(self):
"""
Merely exercises jobsub_setup, and then runs the sleep example.
"""
if not jobsub_setup.Command().has_been_setup():
jobsub_setup.Command().handle()
self.cluster.fs.setuser('jobsub_test')
assert_equal(3, OozieDesign.objects.filter(owner__username='sample').count())
assert_equal(2, OozieMapreduceAction.objects.filter(ooziedesign__owner__username='sample').count())
assert_equal(1, OozieStreamingAction.objects.filter(ooziedesign__owner__username='sample').count())
# Make sure sample user got created.
assert_equal(1, User.objects.filter(username='sample').count())
# Clone design
assert_equal(0, OozieDesign.objects.filter(owner__username='jobsub_test').count())
jobid = OozieDesign.objects.get(name='sleep_job', owner__username='sample').id
self.client.post('/jobsub/clone_design/%d' % jobid)
assert_equal(1, OozieDesign.objects.filter(owner__username='jobsub_test').count())
jobid = OozieDesign.objects.get(owner__username='jobsub_test').id
# And now submit and run the sleep sample
response = self.client.post('/jobsub/submit_design/%d' % jobid, {
'num_reduces': 1,
'num_maps': 1,
'map_sleep_time': 1,
'reduce_sleep_time': 1}, follow=True)
assert_true(sum([status in response.content for status in ('PREP', 'OK', 'DONE')]) > 0)
assert_true(str(jobid) in response.content)
oozie_job_id = response.context['jobid']
job = OozieServerProvider.wait_until_completion(oozie_job_id, timeout=120, step=1)
logs = OozieServerProvider.oozie.get_job_log(oozie_job_id)
assert_equal('SUCCEEDED', job.status, logs)
# Grep
n = OozieDesign.objects.filter(owner__username='jobsub_test').count()
jobid = OozieDesign.objects.get(name='grep_example').id
self.client.post('/jobsub/clone_design/%d' % jobid)
assert_equal(n + 1, OozieDesign.objects.filter(owner__username='jobsub_test').count())
jobid = OozieDesign.objects.get(owner__username='jobsub_test', name__contains='sleep_job').id
# And now submit and run the sleep sample
response = self.client.post('/jobsub/submit_design/%d' % jobid, {
'num_reduces': 1,
'num_maps': 1,
'map_sleep_time': 1,
'reduce_sleep_time': 1}, follow=True)
assert_true(sum([status in response.content for status in ('PREP', 'OK', 'DONE')]) > 0)
assert_true(str(jobid) in response.content)
oozie_job_id = response.context['jobid']
job = OozieServerProvider.wait_until_completion(oozie_job_id, timeout=60, step=1)
logs = OozieServerProvider.oozie.get_job_log(oozie_job_id)
assert_equal('SUCCEEDED', job.status, logs)