本文整理汇总了Python中sahara.utils.edp.compare_job_type函数的典型用法代码示例。如果您正苦于以下问题:Python compare_job_type函数的具体用法?Python compare_job_type怎么用?Python compare_job_type使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了compare_job_type函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _create_job_binary
def _create_job_binary(id, type):
binary = mock.Mock()
binary.id = id
binary.url = "internal-db://42"
if edp.compare_job_type(type, edp.JOB_TYPE_PIG):
binary.name = "script.pig"
elif edp.compare_job_type(type, edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_JAVA):
binary.name = "main.jar"
else:
binary.name = "script.q"
return binary
示例2: _create_job_binary
def _create_job_binary(id, type):
binary = mock.Mock()
binary.id = id
binary.url = "internal-db://42"
if edp.compare_job_type(type, 'Pig'):
binary.name = "script.pig"
elif edp.compare_job_type(type, 'MapReduce', 'Java'):
binary.name = "main.jar"
else:
binary.name = "script.q"
return binary
示例3: get_possible_job_config
def get_possible_job_config(job_type):
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
return {'job_config': ch_helper.get_possible_hive_config_from(
'plugins/vanilla/v2_6_0/resources/hive-default.xml')}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING):
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
'plugins/vanilla/v2_6_0/resources/mapred-default.xml')}
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
return {'job_config': ch_helper.get_possible_pig_config_from(
'plugins/vanilla/v2_6_0/resources/mapred-default.xml')}
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
示例4: get_possible_job_config
def get_possible_job_config(job_type):
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
return {'job_config': ch_helper.get_possible_hive_config_from(
'plugins/cdh/v5_4_0/resources/hive-site.xml')}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING):
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
'plugins/cdh/v5_4_0/resources/mapred-site.xml')}
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
return {'job_config': ch_helper.get_possible_pig_config_from(
'plugins/cdh/v5_4_0/resources/mapred-site.xml')}
return edp_engine.OozieJobEngine.get_possible_job_config(job_type)
示例5: get_possible_job_config
def get_possible_job_config(job_type):
if not edp.compare_job_type(job_type, *edp.JOB_TYPES_ALL):
return None
if edp.compare_job_type(job_type, edp.JOB_TYPE_JAVA):
return {'job_config': {'configs': [], 'args': []}}
if edp.compare_job_type(job_type, edp.JOB_TYPE_SHELL):
return {'job_config': {'configs': [], 'params': {}, 'args': []}}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_PIG):
# TODO(nmakhotkin): Here we need return config based on specific plugin
cfg = xmlutils.load_hadoop_xml_defaults(
'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
if edp.compare_job_type(job_type, edp.JOB_TYPE_MAPREDUCE):
cfg += get_possible_mapreduce_configs()
elif edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
# TODO(nmakhotkin): Here we need return config based on specific plugin
cfg = xmlutils.load_hadoop_xml_defaults(
'plugins/vanilla/v1_2_1/resources/hive-default.xml')
config = {'configs': cfg}
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE):
config.update({'params': {}})
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
config.update({'args': []})
return {'job_config': config}
示例6: get_possible_job_config
def get_possible_job_config(job_type):
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
return {'job_config': ch_helper.get_possible_hive_config_from(
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING):
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')}
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
return {'job_config': ch_helper.get_possible_pig_config_from(
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')}
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
示例7: get_data_sources
def get_data_sources(job_execution, job):
if edp.compare_job_type(job.type, edp.JOB_TYPE_JAVA):
return None, None
ctx = context.ctx()
input_source = conductor.data_source_get(ctx, job_execution.input_id)
output_source = conductor.data_source_get(ctx, job_execution.output_id)
return input_source, output_source
示例8: _create_job_exec
def _create_job_exec(job_id, type, configs=None):
j_exec = mock.Mock()
j_exec.job_id = job_id
j_exec.job_configs = configs
if edp.compare_job_type(type, edp.JOB_TYPE_JAVA):
j_exec.job_configs['configs']['edp.java.main_class'] = _java_main_class
j_exec.job_configs['configs']['edp.java.java_opts'] = _java_opts
return j_exec
示例9: get_possible_job_config
def get_possible_job_config(job_type):
if not edp.compare_job_type(job_type, *edp.JOB_TYPES_ALL):
return None
if edp.compare_job_type(job_type, edp.JOB_TYPE_JAVA):
return {'job_config': {'configs': [], 'args': []}}
if edp.compare_job_type(job_type, edp.JOB_TYPE_SHELL):
return {'job_config': {'configs': [], 'params': [], 'args': []}}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_PIG):
# TODO(nmakhotkin): Here we need return config based on specific plugin
cfg = xmlutils.load_hadoop_xml_defaults(
'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
if edp.compare_job_type(job_type, edp.JOB_TYPE_MAPREDUCE):
cfg += xmlutils.load_hadoop_xml_defaults(
'service/edp/resources/mapred-job-config.xml')
elif edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
# TODO(nmakhotkin): Here we need return config based on specific plugin
cfg = xmlutils.load_hadoop_xml_defaults(
'plugins/vanilla/v1_2_1/resources/hive-default.xml')
# TODO(tmckay): args should be a list when bug #269968
# is fixed on the UI side
config = {'configs': cfg, "args": {}}
if not edp.compare_job_type(edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_JAVA):
config.update({'params': {}})
return {'job_config': config}
示例10: run_job
def run_job(job_execution):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
if cluster.status != 'Active':
return job_execution
job = conductor.job_get(ctx, job_execution.job_id)
if not edp.compare_job_type(job.type, edp.JOB_TYPE_JAVA):
input_source = conductor.data_source_get(ctx, job_execution.input_id)
output_source = conductor.data_source_get(ctx, job_execution.output_id)
else:
input_source = None
output_source = None
#TODO(nprivalova): should be removed after all features implemented
validate(input_source, output_source, job)
for data_source in [input_source, output_source]:
if data_source and data_source.type == 'hdfs':
h.configure_cluster_for_hdfs(cluster, data_source)
hdfs_user = _get_hdfs_user(cluster)
oozie_server = _get_oozie_server(cluster)
wf_dir = create_workflow_dir(oozie_server, job, hdfs_user)
upload_job_files(oozie_server, wf_dir, job, hdfs_user)
creator = workflow_factory.get_creator(job)
wf_xml = creator.get_workflow_xml(cluster, job_execution,
input_source, output_source)
path_to_workflow = upload_workflow_file(oozie_server,
wf_dir, wf_xml, hdfs_user)
rm_path = _get_resource_manager_path(cluster)
nn_path = cluster['info']['HDFS']['NameNode']
client = o.OozieClient(cluster['info']['JobFlow']['Oozie'] + "/oozie/",
_get_oozie_server(cluster))
job_parameters = {"jobTracker": rm_path,
"nameNode": nn_path,
"user.name": hdfs_user,
"oozie.wf.application.path":
"%s%s" % (nn_path, path_to_workflow),
"oozie.use.system.libpath": "true"}
oozie_job_id = client.add_job(x.create_hadoop_xml(job_parameters),
job_execution)
job_execution = conductor.job_execution_update(ctx, job_execution,
{'oozie_job_id':
oozie_job_id,
'start_time':
datetime.datetime.now()})
client.run_job(job_execution, oozie_job_id)
return job_execution
示例11: test_compare_job_type
def test_compare_job_type(self):
self.assertTrue(edp.compare_job_type(
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_MAPREDUCE,
strict=True))
self.assertFalse(edp.compare_job_type(
edp.JOB_TYPE_MAPREDUCE_STREAMING,
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_MAPREDUCE,
strict=True))
self.assertTrue(edp.compare_job_type(
edp.JOB_TYPE_MAPREDUCE_STREAMING,
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_MAPREDUCE))
self.assertFalse(edp.compare_job_type(
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_MAPREDUCE_STREAMING))
示例12: _create_job
def _create_job(id, job_binary, type):
job = mock.Mock()
job.id = id
job.type = type
job.name = 'special_name'
if edp.compare_job_type(type, 'Pig', 'Hive'):
job.mains = [job_binary]
job.libs = None
else:
job.libs = [job_binary]
job.mains = None
return job
示例13: _create_job
def _create_job(id, job_binary, type):
job = mock.Mock()
job.id = id
job.type = type
job.name = 'special_name'
if edp.compare_job_type(type, edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE):
job.mains = [job_binary]
job.libs = None
else:
job.libs = [job_binary]
job.mains = None
return job
示例14: _create_job_exec
def _create_job_exec(job_id, type, configs=None, info=None):
j_exec = mock.Mock()
j_exec.id = six.text_type(uuid.uuid4())
j_exec.job_id = job_id
j_exec.job_configs = configs
j_exec.info = info
if not j_exec.job_configs:
j_exec.job_configs = {}
if edp.compare_job_type(type, edp.JOB_TYPE_JAVA):
j_exec.job_configs['configs']['edp.java.main_class'] = _java_main_class
j_exec.job_configs['configs']['edp.java.java_opts'] = _java_opts
return j_exec
示例15: get_possible_job_config
def get_possible_job_config(job_type):
if not edp.compare_job_type(job_type, *get_possible_job_types()):
return None
if edp.compare_job_type(job_type, 'Java'):
return {'job_config': {'configs': [], 'args': []}}
if edp.compare_job_type(job_type, 'MapReduce', 'Pig'):
#TODO(nmakhotkin) Here we should return config based on specific plugin
cfg = xmlutils.load_hadoop_xml_defaults(
'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
if edp.compare_job_type(job_type, 'MapReduce'):
cfg += xmlutils.load_hadoop_xml_defaults(
'service/edp/resources/mapred-job-config.xml')
elif edp.compare_job_type(job_type, 'Hive'):
#TODO(nmakhotkin) Here we should return config based on specific plugin
cfg = xmlutils.load_hadoop_xml_defaults(
'plugins/vanilla/v1_2_1/resources/hive-default.xml')
# TODO(tmckay): args should be a list when bug #269968
# is fixed on the UI side
config = {'configs': cfg, "args": {}}
if not edp.compare_job_type('MapReduce', 'Java'):
config.update({'params': {}})
return {'job_config': config}