本文整理汇总了Python中airflow.contrib.hooks.spark_submit_hook.SparkSubmitHook.submit方法的典型用法代码示例。如果您正苦于以下问题:Python SparkSubmitHook.submit方法的具体用法?Python SparkSubmitHook.submit怎么用?Python SparkSubmitHook.submit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类airflow.contrib.hooks.spark_submit_hook.SparkSubmitHook
的用法示例。
在下文中一共展示了SparkSubmitHook.submit方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_yarn_process_on_kill
# 需要导入模块: from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook [as 别名]
# 或者: from airflow.contrib.hooks.spark_submit_hook.SparkSubmitHook import submit [as 别名]
def test_yarn_process_on_kill(self, mock_popen):
# Given
mock_popen.return_value.stdout = six.StringIO('stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' +
'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot ' +
'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 ' +
'NodeManagerapplication_1486558679801_1820s',
'INFO Client: Submitting application application_1486558679801_1820 ' +
'to ResourceManager'
]
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
self.assertIn(call(['yarn', 'application', '-kill',
'application_1486558679801_1820'],
stderr=-1, stdout=-1),
mock_popen.mock_calls)
示例2: test_submit
# 需要导入模块: from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook [as 别名]
# 或者: from airflow.contrib.hooks.spark_submit_hook.SparkSubmitHook import submit [as 别名]
def test_submit(self, mock_process):
# We don't have spark-submit available, and this is hard to mock, so let's
# just use this simple mock.
mock_Popen = mock_process.Popen.return_value
mock_Popen.stdout = StringIO(u'stdout')
mock_Popen.stderr = StringIO(u'stderr')
mock_Popen.returncode = None
mock_Popen.communicate.return_value = ['extra stdout', 'extra stderr']
hook = SparkSubmitHook()
hook.submit(self._spark_job_file)
示例3: test_spark_process_runcmd
# 需要导入模块: from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook [as 别名]
# 或者: from airflow.contrib.hooks.spark_submit_hook.SparkSubmitHook import submit [as 别名]
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = StringIO(u'stdout')
mock_popen.return_value.stderr = StringIO(u'stderr')
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSubmitHook(conn_id='')
hook.submit()
# Then
self.assertEqual(mock_popen.mock_calls[0], call(['spark-submit', '--master', 'yarn', '--name', 'default-name', ''], stdout=-1, stderr=-2))
示例4: test_k8s_process_on_kill
# 需要导入模块: from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook [as 别名]
# 或者: from airflow.contrib.hooks.spark_submit_hook.SparkSubmitHook import submit [as 别名]
def test_k8s_process_on_kill(self, mock_popen, mock_client_method):
# Given
mock_popen.return_value.stdout = six.StringIO('stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
client = mock_client_method.return_value
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
log_lines = [
'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,' +
'spark-role -> driver' +
'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42' +
'creation time: 2018-03-05T10:26:55Z' +
'service account name: spark' +
'volumes: spark-init-properties, download-jars-volume,' +
'download-files-volume, spark-token-2vmlm' +
'node name: N/A' +
'start time: N/A' +
'container images: N/A' +
'phase: Pending' +
'status: []' +
'2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,' +
' new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'Exit code: 0'
]
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
import kubernetes
kwargs = {'pretty': True, 'body': kubernetes.client.V1DeleteOptions()}
client.delete_namespaced_pod.assert_called_once_with(
'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver',
'mynamespace', **kwargs)
示例5: SparkSubmitOperator
# 需要导入模块: from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook [as 别名]
# 或者: from airflow.contrib.hooks.spark_submit_hook.SparkSubmitHook import submit [as 别名]
class SparkSubmitOperator(BaseOperator):
"""
This hook is a wrapper around the spark-submit binary to kick off a spark-submit job.
It requires that the "spark-submit" binary is in the PATH or the spark-home is set
in the extra on the connection.
:param application: The application that submitted as a job, either jar or py file.
:type application: str
:param conf: Arbitrary Spark configuration properties
:type conf: dict
:param conn_id: The connection id as configured in Airflow administration. When an
invalid connection_id is supplied, it will default to yarn.
:type conn_id: str
:param files: Upload additional files to the container running the job, separated by a
comma. For example hive-site.xml.
:type files: str
:param py_files: Additional python files used by the job, can be .zip, .egg or .py.
:type py_files: str
:param jars: Submit additional jars to upload and place them in executor classpath.
:param driver_classpath: Additional, driver-specific, classpath settings.
:type driver_classpath: str
:type jars: str
:param java_class: the main class of the Java application
:type java_class: str
:param packages: Comma-separated list of maven coordinates of jars to include on the driver and executor classpaths
:type packages: str
:param exclude_packages: Comma-separated list of maven coordinates of jars to exclude while resolving the dependencies provided in 'packages'
:type exclude_packages: str
:param repositories: Comma-separated list of additional remote repositories to search for the maven coordinates given with 'packages'
:type repositories: str
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors (Default: all the available cores on the worker)
:type total_executor_cores: int
:param executor_cores: (Standalone & YARN only) Number of cores per executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G)
:type driver_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param principal: The name of the kerberos principal used for keytab
:type principal: str
:param name: Name of the job (default airflow-spark)
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param application_args: Arguments for the application being submitted
:type application_args: list
:param verbose: Whether to pass the verbose flag to spark-submit process for debugging
:type verbose: bool
"""
template_fields = ('_name', '_application_args','_packages')
ui_color = WEB_COLORS['LIGHTORANGE']
@apply_defaults
def __init__(self,
application='',
conf=None,
conn_id='spark_default',
files=None,
py_files=None,
driver_classpath=None,
jars=None,
java_class=None,
packages=None,
exclude_packages=None,
repositories=None,
total_executor_cores=None,
executor_cores=None,
executor_memory=None,
driver_memory=None,
keytab=None,
principal=None,
name='airflow-spark',
num_executors=None,
application_args=None,
verbose=False,
*args,
**kwargs):
super(SparkSubmitOperator, self).__init__(*args, **kwargs)
self._application = application
self._conf = conf
self._files = files
self._py_files = py_files
self._driver_classpath = driver_classpath
self._jars = jars
self._java_class = java_class
self._packages = packages
self._exclude_packages = exclude_packages
self._repositories = repositories
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._name = name
self._num_executors = num_executors
self._application_args = application_args
self._verbose = verbose
#.........这里部分代码省略.........