当前位置: 首页>>代码示例>>Python>>正文


Python conf.SparkConf方法代码示例

本文整理汇总了Python中pyspark.conf.SparkConf方法的典型用法代码示例。如果您正苦于以下问题:Python conf.SparkConf方法的具体用法?Python conf.SparkConf怎么用?Python conf.SparkConf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pyspark.conf的用法示例。


在下文中一共展示了conf.SparkConf方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _create_shell_session

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def _create_shell_session():
        """
        Initialize a SparkSession for a pyspark shell session. This is called from shell.py
        to make error handling simpler without needing to declare local variables in that
        script, which would expose those to users.
        """
        import py4j
        from pyspark.conf import SparkConf
        from pyspark.context import SparkContext
        try:
            # Try to access HiveConf, it will raise exception if Hive is not added
            conf = SparkConf()
            if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
                SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
                return SparkSession.builder\
                    .enableHiveSupport()\
                    .getOrCreate()
            else:
                return SparkSession.builder.getOrCreate()
        except (py4j.protocol.Py4JError, TypeError):
            if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
                warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
                              "please make sure you build spark with hive")

        return SparkSession.builder.getOrCreate() 
开发者ID:pingcap,项目名称:tidb-docker-compose,代码行数:27,代码来源:session.py

示例2: test_user_configuration

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def test_user_configuration(self):
        """Make sure user configuration is respected (SPARK-19307)"""
        script = self.createTempFile("test.py", """
            |from pyspark import SparkConf, SparkContext
            |
            |conf = SparkConf().set("spark.test_config", "1")
            |sc = SparkContext(conf = conf)
            |try:
            |    if sc._conf.get("spark.test_config") != "1":
            |        raise Exception("Cannot find spark.test_config in SparkContext's conf.")
            |finally:
            |    sc.stop()
            """)
        proc = subprocess.Popen(
            self.sparkSubmit + ["--master", "local", script],
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT)
        out, err = proc.communicate()
        self.assertEqual(0, proc.returncode, msg="Process failed with error:\n {0}".format(out)) 
开发者ID:runawayhorse001,项目名称:LearningApacheSpark,代码行数:21,代码来源:tests.py

示例3: main

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def main():
    if len(sys.argv) != 3:
        print >> sys.stderr, "Usage: example <keyspace_name> <column_family_name>"
        sys.exit(-1)

    keyspace_name = sys.argv[1]
    column_family_name = sys.argv[2]

    # Valid config options here https://github.com/datastax/spark-cassandra-connector/blob/master/doc/1_connecting.md
    conf = SparkConf().set("spark.cassandra.connection.host", "127.0.0.1")

    sc = SparkContext(appName="Spark + Cassandra Example",
                      conf=conf)

    # import time; time.sleep(30)
    java_import(sc._gateway.jvm, "com.datastax.spark.connector.CassandraJavaUtil")
    print sc._jvm.CassandraJavaUtil

    users = (
        ["Mike", "Sukmanowsky"],
        ["Andrew", "Montalenti"],
        ["Keith", "Bourgoin"],
    )
    rdd = sc.parallelize(users)
    print rdd.collect() 
开发者ID:Parsely,项目名称:pyspark-cassandra,代码行数:27,代码来源:cassandra_example.py

示例4: config

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def config(self, key=None, value=None, conf=None):
            """Sets a config option. Options set using this method are automatically propagated to
            both :class:`SparkConf` and :class:`SparkSession`'s own configuration.

            For an existing SparkConf, use `conf` parameter.

            >>> from pyspark.conf import SparkConf
            >>> SparkSession.builder.config(conf=SparkConf())
            <pyspark.sql.session...

            For a (key, value) pair, you can omit parameter names.

            >>> SparkSession.builder.config("spark.some.config.option", "some-value")
            <pyspark.sql.session...

            :param key: a key name string for configuration property
            :param value: a value for configuration property
            :param conf: an instance of :class:`SparkConf`
            """
            with self._lock:
                if conf is None:
                    self._options[key] = str(value)
                else:
                    for (k, v) in conf.getAll():
                        self._options[k] = v
                return self 
开发者ID:pingcap,项目名称:tidb-docker-compose,代码行数:28,代码来源:session.py

示例5: init_spark_session

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def init_spark_session(self, application_name, spark_master=None):
        """Setup a spark session.

        :param spark_master: A master parameter used by spark session builder.
          Use default value (None) to use system
          environment configured spark cluster.
          Use 'local[*]' to run on a local box.

        :return: spark_session: A spark session
        """
        eva_spark_conf = SparkConf()
        eva_spark_conf.set('spark.logConf', 'true')

        session_builder = SparkSession \
            .builder \
            .appName(application_name) \
            .config(conf=eva_spark_conf)

        if spark_master:
            session_builder.master(spark_master)

        # Gets an existing SparkSession or,
        # if there is no existing one, creates a new one based
        # on the options set in this builder.
        self._session = session_builder.getOrCreate()

        # Configure logging
        log4j_level = LoggingManager().getLog4JLevel()
        spark_context = self._session.sparkContext
        spark_context.setLogLevel(log4j_level) 
开发者ID:georgia-tech-db,项目名称:eva,代码行数:32,代码来源:session.py

示例6: test_external_sort_in_rdd

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def test_external_sort_in_rdd(self):
        conf = SparkConf().set("spark.python.worker.memory", "1m")
        sc = SparkContext(conf=conf)
        l = list(range(10240))
        random.shuffle(l)
        rdd = sc.parallelize(l, 4)
        self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
        sc.stop() 
开发者ID:runawayhorse001,项目名称:LearningApacheSpark,代码行数:10,代码来源:tests.py

示例7: conf

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def conf(cls):
        """
        Override this in subclasses to supply a more specific conf
        """
        return SparkConf() 
开发者ID:runawayhorse001,项目名称:LearningApacheSpark,代码行数:7,代码来源:tests.py

示例8: setUp

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def setUp(self):
        self._old_sys_path = list(sys.path)
        class_name = self.__class__.__name__
        conf = SparkConf().set("spark.python.profile", "true")
        self.sc = SparkContext('local[4]', class_name, conf=conf) 
开发者ID:runawayhorse001,项目名称:LearningApacheSpark,代码行数:7,代码来源:tests.py

示例9: test_profiler_disabled

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def test_profiler_disabled(self):
        sc = SparkContext(conf=SparkConf().set("spark.python.profile", "false"))
        try:
            self.assertRaisesRegexp(
                RuntimeError,
                "'spark.python.profile' configuration must be set",
                lambda: sc.show_profiles())
            self.assertRaisesRegexp(
                RuntimeError,
                "'spark.python.profile' configuration must be set",
                lambda: sc.dump_profiles("/tmp/abc"))
        finally:
            sc.stop() 
开发者ID:runawayhorse001,项目名称:LearningApacheSpark,代码行数:15,代码来源:tests.py

示例10: getOrCreate

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def getOrCreate(cls, conf=None):
        """
        Get or instantiate a SparkContext and register it as a singleton object.

        :param conf: SparkConf (optional)
        """
        with SparkContext._lock:
            if SparkContext._active_spark_context is None:
                SparkContext(conf=conf or SparkConf())
            return SparkContext._active_spark_context 
开发者ID:runawayhorse001,项目名称:LearningApacheSpark,代码行数:12,代码来源:context.py

示例11: getConf

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def getConf(self):
        conf = SparkConf()
        conf.setAll(self._conf.getAll())
        return conf 
开发者ID:runawayhorse001,项目名称:LearningApacheSpark,代码行数:6,代码来源:context.py

示例12: _test_multiple_broadcasts

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def _test_multiple_broadcasts(self, *extra_confs):
        """
        Test broadcast variables make it OK to the executors.  Tests multiple broadcast variables,
        and also multiple jobs.
        """
        conf = SparkConf()
        for key, value in extra_confs:
            conf.set(key, value)
        conf.setMaster("local-cluster[2,1,1024]")
        self.sc = SparkContext(conf=conf)
        self._test_encryption_helper([5])
        self._test_encryption_helper([5, 10, 20]) 
开发者ID:runawayhorse001,项目名称:LearningApacheSpark,代码行数:14,代码来源:test_broadcast.py

示例13: setUpClass

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def setUpClass(cls):
        gateway = launch_gateway(SparkConf())
        cls._jvm = gateway.jvm
        cls.longMessage = True
        random.seed(42) 
开发者ID:runawayhorse001,项目名称:LearningApacheSpark,代码行数:7,代码来源:test_broadcast.py

示例14: registerFunction

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def registerFunction(self, ssc, jsession, function_name, params):
        jvm = self.gateway.jvm
        # If we don't have a reference to a running SparkContext
        # Get the SparkContext from the provided SparkSession.
        if not self._sc:
            master = ssc.master()
            jsc = jvm.org.apache.spark.api.java.JavaSparkContext(ssc)
            jsparkConf = ssc.conf()
            sparkConf = SparkConf(_jconf=jsparkConf)
            self._sc = SparkContext(
                master=master,
                conf=sparkConf,
                gateway=self.gateway,
                jsc=jsc)
            self._session = SparkSession.builder.getOrCreate()
        if function_name in functions_info:
            function_info = functions_info[function_name]
            if params:
                evaledParams = ast.literal_eval(params)
            else:
                evaledParams = []
            func = function_info.func(*evaledParams)
            ret_type = function_info.returnType()
            self._count = self._count + 1
            registration_name = function_name + str(self._count)
            udf = UserDefinedFunction(func, ret_type, registration_name)
            # Used to allow non-default (e.g. Arrow) UDFS
            udf.evalType = function_info.evalType()
            judf = udf._judf
            return judf
        else:
            print("Could not find function")
            # We do this rather than raising an exception since Py4J debugging
            # is rough and we can check it.
            return None 
开发者ID:sparklingpandas,项目名称:sparklingml,代码行数:37,代码来源:startup.py

示例15: spark_jvm_imports

# 需要导入模块: from pyspark import conf [as 别名]
# 或者: from pyspark.conf import SparkConf [as 别名]
def spark_jvm_imports(jvm):
        # Import the classes used by PySpark
        java_import(jvm, "org.apache.spark.SparkConf")
        java_import(jvm, "org.apache.spark.api.java.*")
        java_import(jvm, "org.apache.spark.api.python.*")
        java_import(jvm, "org.apache.spark.ml.python.*")
        java_import(jvm, "org.apache.spark.mllib.api.python.*")
        # TODO(davies): move into sql
        java_import(jvm, "org.apache.spark.sql.*")
        java_import(jvm, "org.apache.spark.sql.hive.*")
        java_import(jvm, "scala.Tuple2") 
开发者ID:sparklingpandas,项目名称:sparklingml,代码行数:13,代码来源:startup.py


注:本文中的pyspark.conf.SparkConf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。