本文整理汇总了Python中liboozie.oozie_api_tests.OozieServerProvider.wait_until_completion方法的典型用法代码示例。如果您正苦于以下问题:Python OozieServerProvider.wait_until_completion方法的具体用法?Python OozieServerProvider.wait_until_completion怎么用?Python OozieServerProvider.wait_until_completion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类liboozie.oozie_api_tests.OozieServerProvider
的用法示例。
在下文中一共展示了OozieServerProvider.wait_until_completion方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setup_class
# 需要导入模块: from liboozie.oozie_api_tests import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_tests.OozieServerProvider import wait_until_completion [as 别名]
def setup_class(cls):
OozieServerProvider.setup_class()
cls.username = 'hue_jobbrowser_test'
cls.home_dir = '/user/%s' % cls.username
cls.cluster.fs.do_as_user(cls.username, cls.cluster.fs.create_home_dir, cls.home_dir)
cls.client = make_logged_in_client(username=cls.username, is_superuser=False, groupname='test')
cls.user = User.objects.get(username=cls.username)
grant_access(cls.username, 'test', 'jobsub')
grant_access(cls.username, 'test', 'jobbrowser')
grant_access(cls.username, 'test', 'oozie')
add_to_group(cls.username)
cls.prev_user = cls.cluster.fs.user
cls.cluster.fs.setuser(cls.username)
cls.install_examples()
cls.design = cls.create_design()
# Run the sleep example, since it doesn't require user home directory
design_id = cls.design.id
response = cls.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
OozieServerProvider.wait_until_completion(oozie_jobid)
cls.hadoop_job_id = get_hadoop_job_id(cls.oozie, oozie_jobid, 1)
cls.hadoop_job_id_short = views.get_shorter_id(cls.hadoop_job_id)
示例2: setUp
# 需要导入模块: from liboozie.oozie_api_tests import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_tests.OozieServerProvider import wait_until_completion [as 别名]
def setUp(self):
"""
To clean: creating test1, test2, test3...users
"""
TestJobBrowserWithHadoop.user_count += 1
self.username = 'test' + str(TestJobBrowserWithHadoop.user_count)
self.home_dir = '/user/%s' % self.username
self.cluster.fs.do_as_user(self.username, self.cluster.fs.create_home_dir, self.home_dir)
self.client = make_logged_in_client(username=self.username, is_superuser=False, groupname='test')
self.user = User.objects.get(username=self.username)
grant_access(self.username, 'test', 'jobsub')
grant_access(self.username, 'test', 'jobbrowser')
grant_access(self.username, 'test', 'oozie')
add_to_group(self.username)
self.prev_user = self.cluster.fs.user
self.cluster.fs.setuser(self.username)
self.install_examples()
self.design = self.create_design()
raise SkipTest
# Run the sleep example, since it doesn't require user home directory
design_id = self.design.id
response = self.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
self.hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
self.hadoop_job_id_short = views.get_shorter_id(self.hadoop_job_id)
示例3: setup_class
# 需要导入模块: from liboozie.oozie_api_tests import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_tests.OozieServerProvider import wait_until_completion [as 别名]
def setup_class(cls):
OozieServerProvider.setup_class()
cls.username = "hue_jobbrowser_test"
cls.home_dir = "/user/%s" % cls.username
cls.cluster.fs.do_as_user(cls.username, cls.cluster.fs.create_home_dir, cls.home_dir)
cls.client = make_logged_in_client(username=cls.username, is_superuser=False, groupname="test")
cls.user = User.objects.get(username=cls.username)
grant_access(cls.username, "test", "jobsub")
grant_access(cls.username, "test", "jobbrowser")
grant_access(cls.username, "test", "oozie")
add_to_group(cls.username)
cls.prev_user = cls.cluster.fs.user
cls.cluster.fs.setuser(cls.username)
cls.install_examples()
cls.design = cls.create_design()
# Run the sleep example, since it doesn't require user home directory
design_id = cls.design.id
response = cls.client.post(
reverse("oozie:submit_workflow", args=[design_id]),
data={
u"form-MAX_NUM_FORMS": [u""],
u"form-INITIAL_FORMS": [u"1"],
u"form-0-name": [u"REDUCER_SLEEP_TIME"],
u"form-0-value": [u"1"],
u"form-TOTAL_FORMS": [u"1"],
},
follow=True,
)
oozie_jobid = response.context["oozie_workflow"].id
OozieServerProvider.wait_until_completion(oozie_jobid)
cls.hadoop_job_id = get_hadoop_job_id(cls.oozie, oozie_jobid, 1)
cls.hadoop_job_id_short = views.get_shorter_id(cls.hadoop_job_id)
示例4: test_failed_jobs
# 需要导入模块: from liboozie.oozie_api_tests import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_tests.OozieServerProvider import wait_until_completion [as 别名]
def test_failed_jobs(self):
"""
Test jobs with genuine failure, not just killed
"""
if is_live_cluster():
raise SkipTest('HUE-2902: Skipping because test is not reentrant')
# Create design that will fail because the script file isn't there
INPUT_DIR = TestJobBrowserWithHadoop.home_dir + '/input'
OUTPUT_DIR = TestJobBrowserWithHadoop.home_dir + '/output'
try:
TestJobBrowserWithHadoop.cluster.fs.mkdir(TestJobBrowserWithHadoop.home_dir + "/jt-test_failed_jobs")
TestJobBrowserWithHadoop.cluster.fs.mkdir(INPUT_DIR)
TestJobBrowserWithHadoop.cluster.fs.rmtree(OUTPUT_DIR)
except:
LOG.exception('failed to teardown tests')
job_name = '%s_%s' % (TestJobBrowserWithHadoop.username, 'test_failed_jobs-1')
response = TestJobBrowserWithHadoop.client.post(reverse('jobsub.views.new_design', kwargs={'node_type': 'mapreduce'}), {
'name': [job_name],
'description': ['description test_failed_jobs-1'],
'args': '',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'archives': '[]',
'files': '[]',
'job_properties': ['[{"name":"mapred.input.dir","value":"%s"},\
{"name":"mapred.output.dir","value":"%s"},\
{"name":"mapred.mapper.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.combiner.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.reducer.class","value":"org.apache.hadoop.mapred.lib.dne"}]' % (INPUT_DIR, OUTPUT_DIR)]
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', follow=True)
# Submit the job
design_dict = json.loads(response.content)
design_id = int(design_dict['id'])
response = TestJobBrowserWithHadoop.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
job = OozieServerProvider.wait_until_completion(oozie_jobid)
hadoop_job_id = get_hadoop_job_id(TestJobBrowserWithHadoop.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# Select only killed jobs (should be absent)
# Taking advantage of the fact new jobs are at the top of the list!
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&state=killed')
assert_false(hadoop_job_id_short in response.content)
# Select only failed jobs (should be present)
# Map job should succeed. Reduce job should fail.
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&state=failed')
assert_true(hadoop_job_id_short in response.content)
raise SkipTest # Not compatible with MR2
# The single job view should have the failed task table
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s' % (hadoop_job_id,))
html = response.content.lower()
assert_true('failed task' in html, html)
# The map task should say success (empty input)
map_task_id = TestJobBrowserWithHadoop.hadoop_job_id.replace('job', 'task') + '_m_000000'
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, map_task_id))
assert_true('succeed' in response.content)
assert_true('failed' not in response.content)
# The reduce task should say failed
reduce_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000'
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, reduce_task_id))
assert_true('succeed' not in response.content)
assert_true('failed' in response.content)
# Selecting by failed state should include the failed map
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id,))
assert_true('r_000000' in response.content)
assert_true('m_000000' not in response.content)
示例5: test_failed_jobs
# 需要导入模块: from liboozie.oozie_api_tests import OozieServerProvider [as 别名]
# 或者: from liboozie.oozie_api_tests.OozieServerProvider import wait_until_completion [as 别名]
def test_failed_jobs(self):
"""
Test jobs with genuine failure, not just killed
"""
if is_live_cluster():
raise SkipTest("HUE-2902: Skipping because test is not reentrant")
# Create design that will fail because the script file isn't there
INPUT_DIR = TestJobBrowserWithHadoop.home_dir + "/input"
OUTPUT_DIR = TestJobBrowserWithHadoop.home_dir + "/output"
try:
TestJobBrowserWithHadoop.cluster.fs.mkdir(TestJobBrowserWithHadoop.home_dir + "/jt-test_failed_jobs")
TestJobBrowserWithHadoop.cluster.fs.mkdir(INPUT_DIR)
TestJobBrowserWithHadoop.cluster.fs.rmtree(OUTPUT_DIR)
except:
LOG.exception("failed to teardown tests")
job_name = "%s_%s" % (TestJobBrowserWithHadoop.username, "test_failed_jobs-1")
response = TestJobBrowserWithHadoop.client.post(
reverse("jobsub.views.new_design", kwargs={"node_type": "mapreduce"}),
{
"name": [job_name],
"description": ["description test_failed_jobs-1"],
"args": "",
"jar_path": "/user/hue/oozie/workspaces/lib/hadoop-examples.jar",
"prepares": "[]",
"archives": "[]",
"files": "[]",
"job_properties": [
'[{"name":"mapred.input.dir","value":"%s"},\
{"name":"mapred.output.dir","value":"%s"},\
{"name":"mapred.mapper.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.combiner.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.reducer.class","value":"org.apache.hadoop.mapred.lib.dne"}]'
% (INPUT_DIR, OUTPUT_DIR)
],
},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
follow=True,
)
# Submit the job
design_dict = json.loads(response.content)
design_id = int(design_dict["id"])
response = TestJobBrowserWithHadoop.client.post(
reverse("oozie:submit_workflow", args=[design_id]),
data={
u"form-MAX_NUM_FORMS": [u""],
u"form-INITIAL_FORMS": [u"1"],
u"form-0-name": [u"REDUCER_SLEEP_TIME"],
u"form-0-value": [u"1"],
u"form-TOTAL_FORMS": [u"1"],
},
follow=True,
)
oozie_jobid = response.context["oozie_workflow"].id
job = OozieServerProvider.wait_until_completion(oozie_jobid)
hadoop_job_id = get_hadoop_job_id(TestJobBrowserWithHadoop.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# Select only killed jobs (should be absent)
# Taking advantage of the fact new jobs are at the top of the list!
response = TestJobBrowserWithHadoop.client.get("/jobbrowser/jobs/?format=json&state=killed")
assert_false(hadoop_job_id_short in response.content)
# Select only failed jobs (should be present)
# Map job should succeed. Reduce job should fail.
response = TestJobBrowserWithHadoop.client.get("/jobbrowser/jobs/?format=json&state=failed")
assert_true(hadoop_job_id_short in response.content)
raise SkipTest # Not compatible with MR2
# The single job view should have the failed task table
response = TestJobBrowserWithHadoop.client.get("/jobbrowser/jobs/%s" % (hadoop_job_id,))
html = response.content.lower()
assert_true("failed task" in html, html)
# The map task should say success (empty input)
map_task_id = TestJobBrowserWithHadoop.hadoop_job_id.replace("job", "task") + "_m_000000"
response = TestJobBrowserWithHadoop.client.get("/jobbrowser/jobs/%s/tasks/%s" % (hadoop_job_id, map_task_id))
assert_true("succeed" in response.content)
assert_true("failed" not in response.content)
# The reduce task should say failed
reduce_task_id = hadoop_job_id.replace("job", "task") + "_r_000000"
response = TestJobBrowserWithHadoop.client.get("/jobbrowser/jobs/%s/tasks/%s" % (hadoop_job_id, reduce_task_id))
assert_true("succeed" not in response.content)
assert_true("failed" in response.content)
# Selecting by failed state should include the failed map
response = TestJobBrowserWithHadoop.client.get("/jobbrowser/jobs/%s/tasks?taskstate=failed" % (hadoop_job_id,))
assert_true("r_000000" in response.content)
assert_true("m_000000" not in response.content)