當前位置: 首頁>>代碼示例>>Python>>正文


Python SparkSession.builder方法代碼示例

本文整理匯總了Python中pyspark.sql.SparkSession.builder方法的典型用法代碼示例。如果您正苦於以下問題:Python SparkSession.builder方法的具體用法?Python SparkSession.builder怎麽用?Python SparkSession.builder使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pyspark.sql.SparkSession的用法示例。


在下文中一共展示了SparkSession.builder方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_spark_session

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def get_spark_session(enable_hive=False, app_name='marvin-engine', configs=[]):
    """Return a Spark Session object"""

    # Prepare spark context to be used
    import findspark
    findspark.init()
    from pyspark.sql import SparkSession

    # prepare spark sesseion to be returned
    spark = SparkSession.builder

    spark = spark.appName(app_name)
    spark = spark.enableHiveSupport() if enable_hive else spark

    # if has configs
    for config in configs:
        spark = spark.config(config)

    return spark.getOrCreate() 
開發者ID:marvin-ai,項目名稱:marvin-python-toolbox,代碼行數:21,代碼來源:data_source_provider.py

示例2: spark

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def spark(request):
    spark = SparkSession.builder \
        .master('local[*]') \
        .enableHiveSupport() \
        .getOrCreate()

    # Now populate some tables
    for database_name in ['tst_app', 'transaction_a', 'transaction_b']:
        spark.sql('DROP DATABASE IF EXISTS {0} CASCADE'.format(database_name)).collect()
        spark.sql('CREATE DATABASE {0}'.format(database_name))

    populate_transaction_a(spark)
    populate_transaction_b(spark)
    populate_account_info(spark)
    populate_countries(spark)

    return spark 
開發者ID:danielvdende,項目名稱:data-testing-with-airflow,代碼行數:19,代碼來源:conftest.py

示例3: spark

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def spark():
    spark = SparkSession.builder \
        .config('spark.sql.warehouse.dir', '/usr/local/airflow/spark_warehouse') \
        .config('spark.hadoop.javax.jdo.option.ConnectionURL',
                'jdbc:derby:;databaseName=/usr/local/airflow/metastore_db;create=true') \
        .enableHiveSupport() \
        .getOrCreate()

    # Now populate some tables
    for database_name in ['dev_app', 'tst_app', 'acc_app', 'prd_app', 'transaction_a', 'transaction_b']:
        spark.sql('DROP DATABASE IF EXISTS {0} CASCADE'.format(database_name)).collect()
        spark.sql('CREATE DATABASE {0}'.format(database_name)).collect()

    populate_transaction_a(spark)
    populate_transaction_b(spark)

    for environment in ['dev', 'tst', 'acc', 'prd']:
        populate_account_info(spark, environment)
        populate_countries(spark, environment) 
開發者ID:danielvdende,項目名稱:data-testing-with-airflow,代碼行數:21,代碼來源:populate_tables.py

示例4: _test

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def _test():
    import doctest
    import pyspark.ml.image
    globs = pyspark.ml.image.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[2]")\
        .appName("ml.image tests")\
        .getOrCreate()
    globs['spark'] = spark

    (failure_count, test_count) = doctest.testmod(
        pyspark.ml.image, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:18,代碼來源:image.py

示例5: _test

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def _test():
    import doctest
    import numpy
    from pyspark.sql import SparkSession
    import pyspark.mllib.evaluation
    try:
        # Numpy 1.14+ changed it's string format.
        numpy.set_printoptions(legacy='1.13')
    except TypeError:
        pass
    globs = pyspark.mllib.evaluation.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("mllib.evaluation tests")\
        .getOrCreate()
    globs['sc'] = spark.sparkContext
    (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:22,代碼來源:evaluation.py

示例6: _test

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def _test():
    import doctest
    from pyspark.sql import SparkSession
    import pyspark.mllib.fpm
    globs = pyspark.mllib.fpm.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("mllib.fpm tests")\
        .getOrCreate()
    globs['sc'] = spark.sparkContext
    import tempfile

    temp_path = tempfile.mkdtemp()
    globs['temp_path'] = temp_path
    try:
        (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
        spark.stop()
    finally:
        from shutil import rmtree
        try:
            rmtree(temp_path)
        except OSError:
            pass
    if failure_count:
        sys.exit(-1) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:27,代碼來源:fpm.py

示例7: _test

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def _test():
    import doctest
    import numpy
    from pyspark.sql import SparkSession
    from pyspark.mllib.linalg import Matrices
    import pyspark.mllib.linalg.distributed
    try:
        # Numpy 1.14+ changed it's string format.
        numpy.set_printoptions(legacy='1.13')
    except TypeError:
        pass
    globs = pyspark.mllib.linalg.distributed.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[2]")\
        .appName("mllib.linalg.distributed tests")\
        .getOrCreate()
    globs['sc'] = spark.sparkContext
    globs['Matrices'] = Matrices
    (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:24,代碼來源:distributed.py

示例8: _test

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def _test():
    import doctest
    from pyspark.sql import Row, SparkSession
    import pyspark.sql.functions
    globs = pyspark.sql.functions.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("sql.functions tests")\
        .getOrCreate()
    sc = spark.sparkContext
    globs['sc'] = sc
    globs['spark'] = spark
    globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.functions, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:21,代碼來源:functions.py

示例9: _test

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def _test():
    import doctest
    from pyspark.sql import SparkSession
    import pyspark.sql.udf
    globs = pyspark.sql.udf.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("sql.udf tests")\
        .getOrCreate()
    globs['spark'] = spark
    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.udf, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:18,代碼來源:udf.py

示例10: cast

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def cast(self, dataType):
        """ Convert the column into type ``dataType``.

        >>> df.select(df.age.cast("string").alias('ages')).collect()
        [Row(ages=u'2'), Row(ages=u'5')]
        >>> df.select(df.age.cast(StringType()).alias('ages')).collect()
        [Row(ages=u'2'), Row(ages=u'5')]
        """
        if isinstance(dataType, basestring):
            jc = self._jc.cast(dataType)
        elif isinstance(dataType, DataType):
            from pyspark.sql import SparkSession
            spark = SparkSession.builder.getOrCreate()
            jdt = spark._jsparkSession.parseDataType(dataType.json())
            jc = self._jc.cast(jdt)
        else:
            raise TypeError("unexpected type: %s" % type(dataType))
        return Column(jc) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:20,代碼來源:column.py

示例11: _test

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def _test():
    import doctest
    from pyspark.sql import SparkSession
    import pyspark.sql.column
    globs = pyspark.sql.column.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("sql.column tests")\
        .getOrCreate()
    sc = spark.sparkContext
    globs['spark'] = spark
    globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
        .toDF(StructType([StructField('age', IntegerType()),
                          StructField('name', StringType())]))

    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.column, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:23,代碼來源:column.py

示例12: _spark_session

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def _spark_session():
    """Internal fixture for SparkSession instance.

    Yields SparkSession instance if it is supported by the pyspark
    version, otherwise yields None.

    Required to correctly initialize `spark_context` fixture after
    `spark_session` fixture.

    ..note::
        It is not possible to create SparkSession from the existing
        SparkContext.
    """

    try:
        from pyspark.sql import SparkSession
    except ImportError:
        yield
    else:
        session = SparkSession.builder \
            .config(conf=SparkConfigBuilder().get()) \
            .getOrCreate()

        yield session
        session.stop() 
開發者ID:malexer,項目名稱:pytest-spark,代碼行數:27,代碼來源:fixtures.py

示例13: _load_pyfunc

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def _load_pyfunc(path):
    """
    Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.

    :param path: Local filesystem path to the MLflow Model with the ``spark`` flavor.
    """
    # NOTE: The getOrCreate() call below may change settings of the active session which we do not
    # intend to do here. In particular, setting master to local[1] can break distributed clusters.
    # To avoid this problem, we explicitly check for an active session. This is not ideal but there
    # is no good workaround at the moment.
    import pyspark

    spark = pyspark.sql.SparkSession._instantiatedSession
    if spark is None:
        spark = pyspark.sql.SparkSession.builder.config("spark.python.worker.reuse", True) \
            .master("local[1]").getOrCreate()
    return _PyFuncModelWrapper(spark, _load_model(model_uri=path)) 
開發者ID:mlflow,項目名稱:mlflow,代碼行數:19,代碼來源:spark.py

示例14: create_testing_spark_session

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def create_testing_spark_session(cls):
        return (SparkSession.builder
                .master('local[2]')
                .appName('sparkflow')
                .getOrCreate()) 
開發者ID:lifeomic,項目名稱:sparkflow,代碼行數:7,代碼來源:dl_runner.py

示例15: spark

# 需要導入模塊: from pyspark.sql import SparkSession [as 別名]
# 或者: from pyspark.sql.SparkSession import builder [as 別名]
def spark(request):
    """
    Fixture to create the SparkSession.
    """
    spark = SparkSession.builder \
        .appName(APP_NAME) \
        .config('spark.sql.warehouse.dir', '/usr/local/airflow/spark_warehouse') \
        .config('spark.hadoop.javax.jdo.option.ConnectionURL',
                'jdbc:derby:;databaseName=/usr/local/airflow/metastore_db;create=true') \
        .enableHiveSupport() \
        .getOrCreate()

    request.addfinalizer(spark.stop)

    return spark 
開發者ID:danielvdende,項目名稱:data-testing-with-airflow,代碼行數:17,代碼來源:conftest.py


注:本文中的pyspark.sql.SparkSession.builder方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。