當前位置: 首頁>>代碼示例>>Python>>正文


Python SparkContext.getOrCreate方法代碼示例

本文整理匯總了Python中pyspark.context.SparkContext.getOrCreate方法的典型用法代碼示例。如果您正苦於以下問題:Python SparkContext.getOrCreate方法的具體用法?Python SparkContext.getOrCreate怎麽用?Python SparkContext.getOrCreate使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pyspark.context.SparkContext的用法示例。


在下文中一共展示了SparkContext.getOrCreate方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _create_shell_session

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def _create_shell_session():
        """
        Initialize a SparkSession for a pyspark shell session. This is called from shell.py
        to make error handling simpler without needing to declare local variables in that
        script, which would expose those to users.
        """
        import py4j
        from pyspark.conf import SparkConf
        from pyspark.context import SparkContext
        try:
            # Try to access HiveConf, it will raise exception if Hive is not added
            conf = SparkConf()
            if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
                SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
                return SparkSession.builder\
                    .enableHiveSupport()\
                    .getOrCreate()
            else:
                return SparkSession.builder.getOrCreate()
        except (py4j.protocol.Py4JError, TypeError):
            if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
                warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
                              "please make sure you build spark with hive")

        return SparkSession.builder.getOrCreate() 
開發者ID:pingcap,項目名稱:tidb-docker-compose,代碼行數:27,代碼來源:session.py

示例2: __init__

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def __init__(self, sparkContext, jsparkSession=None):
        """Creates a new SparkSession.

        >>> from datetime import datetime
        >>> spark = SparkSession(sc)
        >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
        ...     b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
        ...     time=datetime(2014, 8, 1, 14, 1, 5))])
        >>> df = allTypes.toDF()
        >>> df.createOrReplaceTempView("allTypes")
        >>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
        ...            'from allTypes where b and i > 0').collect()
        [Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
            dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
        >>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
        [(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
        """
        from pyspark.sql.context import SQLContext
        self._sc = sparkContext
        self._jsc = self._sc._jsc
        self._jvm = self._sc._jvm
        if jsparkSession is None:
            if self._jvm.SparkSession.getDefaultSession().isDefined() \
                    and not self._jvm.SparkSession.getDefaultSession().get() \
                        .sparkContext().isStopped():
                jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
            else:
		jsparkSession = self._jvm.SparkSession.builder().getOrCreate()
#                jsparkSession = self._jvm.SparkSession(self._jsc.sc())
        self._jsparkSession = jsparkSession
        self._jwrapped = self._jsparkSession.sqlContext()
        self._wrapped = SQLContext(self._sc, self, self._jwrapped)
        _monkey_patch_RDD(self)
        install_exception_handler()
        # If we had an instantiated SparkSession attached with a SparkContext
        # which is stopped now, we need to renew the instantiated SparkSession.
        # Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
        if SparkSession._instantiatedSession is None \
                or SparkSession._instantiatedSession._sc._jsc is None:
            SparkSession._instantiatedSession = self
            self._jvm.SparkSession.setDefaultSession(self._jsparkSession) 
開發者ID:pingcap,項目名稱:tidb-docker-compose,代碼行數:43,代碼來源:session.py

示例3: __enter__

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def __enter__(self):
        """
        Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
        """
        return self 
開發者ID:pingcap,項目名稱:tidb-docker-compose,代碼行數:7,代碼來源:session.py

示例4: __exit__

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def __exit__(self, exc_type, exc_val, exc_tb):
        """
        Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.

        Specifically stop the SparkSession on exit of the with block.
        """
        self.stop() 
開發者ID:pingcap,項目名稱:tidb-docker-compose,代碼行數:9,代碼來源:session.py

示例5: __init__

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def __init__(self, sparkContext, jsparkSession=None):
        """Creates a new SparkSession.

        >>> from datetime import datetime
        >>> spark = SparkSession(sc)
        >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
        ...     b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
        ...     time=datetime(2014, 8, 1, 14, 1, 5))])
        >>> df = allTypes.toDF()
        >>> df.createOrReplaceTempView("allTypes")
        >>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
        ...            'from allTypes where b and i > 0').collect()
        [Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
            dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
        >>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
        [(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
        """
        from pyspark.sql.context import SQLContext
        self._sc = sparkContext
        self._jsc = self._sc._jsc
        self._jvm = self._sc._jvm
        if jsparkSession is None:
            jsparkSession = self._jvm.SparkSession.builder().getOrCreate()
        self._jsparkSession = jsparkSession
        self._jwrapped = self._jsparkSession.sqlContext()
        self._wrapped = SQLContext(self._sc, self, self._jwrapped)
        _monkey_patch_RDD(self)
        install_exception_handler()
        # If we had an instantiated SparkSession attached with a SparkContext
        # which is stopped now, we need to renew the instantiated SparkSession.
        # Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
        if SparkSession._instantiatedSession is None \
                or SparkSession._instantiatedSession._sc._jsc is None:
            SparkSession._instantiatedSession = self 
開發者ID:pingcap,項目名稱:tidb-docker-compose,代碼行數:36,代碼來源:session.py

示例6: test_get_or_create

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def test_get_or_create(self):
        with SparkContext.getOrCreate() as sc:
            self.assertTrue(SparkContext.getOrCreate() is sc) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:5,代碼來源:tests.py

示例7: __init__

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def __init__(self, sparkContext, jsparkSession=None):
        """Creates a new SparkSession.

        >>> from datetime import datetime
        >>> spark = SparkSession(sc)
        >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
        ...     b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
        ...     time=datetime(2014, 8, 1, 14, 1, 5))])
        >>> df = allTypes.toDF()
        >>> df.createOrReplaceTempView("allTypes")
        >>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
        ...            'from allTypes where b and i > 0').collect()
        [Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
            dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
        >>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
        [(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
        """
        from pyspark.sql.context import SQLContext
        self._sc = sparkContext
        self._jsc = self._sc._jsc
        self._jvm = self._sc._jvm
        if jsparkSession is None:
            if self._jvm.SparkSession.getDefaultSession().isDefined() \
                    and not self._jvm.SparkSession.getDefaultSession().get() \
                        .sparkContext().isStopped():
                jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
            else:
                jsparkSession = self._jvm.SparkSession(self._jsc.sc())
        self._jsparkSession = jsparkSession
        self._jwrapped = self._jsparkSession.sqlContext()
        self._wrapped = SQLContext(self._sc, self, self._jwrapped)
        _monkey_patch_RDD(self)
        install_exception_handler()
        # If we had an instantiated SparkSession attached with a SparkContext
        # which is stopped now, we need to renew the instantiated SparkSession.
        # Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
        if SparkSession._instantiatedSession is None \
                or SparkSession._instantiatedSession._sc._jsc is None:
            SparkSession._instantiatedSession = self
            self._jvm.SparkSession.setDefaultSession(self._jsparkSession) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:42,代碼來源:session.py

示例8: broadcast

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def broadcast(self):
        """Broadcast self to ensure we are shared."""
        if self._broadcast is None:
            from pyspark.context import SparkContext
            sc = SparkContext.getOrCreate()
            try:
                SpacyMagic.__lock.acquire()
                self.__empty_please = True
                self._broadcast = sc.broadcast(self)
                self.__empty_please = False
            finally:
                SpacyMagic.__lock.release()
        return self._broadcast 
開發者ID:sparklingpandas,項目名稱:sparklingml,代碼行數:15,代碼來源:transformation_functions.py

示例9: _init_glue_context

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def _init_glue_context():
        # Imports are done here so we can isolate the configuration of this job
        from awsglue.context import GlueContext
        from pyspark.context import SparkContext
        spark_context = SparkContext.getOrCreate()
        spark_context._jsc.hadoopConfiguration().set("mapreduce.fileoutputcommitter.marksuccessfuljobs", "false")  # noqa pylint: disable=protected-access
        spark_context._jsc.hadoopConfiguration().set("parquet.enable.summary-metadata", "false")  # noqa pylint: disable=protected-access
        return GlueContext(spark_context) 
開發者ID:awslabs,項目名稱:athena-glue-service-logs,代碼行數:10,代碼來源:job.py

示例10: _fit

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def _fit(self, dataset):
    """Trains a TensorFlow model and returns a TFModel instance with the same args/params pointing to a checkpoint or saved_model on disk.

    Args:
      :dataset: A Spark DataFrame with columns that will be mapped to TensorFlow tensors.

    Returns:
      A TFModel representing the trained model, backed on disk by a TensorFlow checkpoint or saved_model.
    """
    sc = SparkContext.getOrCreate()

    logger.info("===== 1. train args: {0}".format(self.args))
    logger.info("===== 2. train params: {0}".format(self._paramMap))
    local_args = self.merge_args_params()
    logger.info("===== 3. train args + params: {0}".format(local_args))

    tf_args = self.args.argv if self.args.argv else local_args
    cluster = TFCluster.run(sc, self.train_fn, tf_args, local_args.cluster_size, local_args.num_ps,
                            local_args.tensorboard, TFCluster.InputMode.SPARK, master_node=local_args.master_node, driver_ps_nodes=local_args.driver_ps_nodes)
    # feed data, using a deterministic order for input columns (lexicographic by key)
    input_cols = sorted(self.getInputMapping())
    cluster.train(dataset.select(input_cols).rdd, local_args.epochs)
    cluster.shutdown(grace_secs=self.getGraceSecs())

    if self.export_fn:
      if version.parse(TF_VERSION) < version.parse("2.0.0"):
        # For TF1.x, run export function, if provided
        assert local_args.export_dir, "Export function requires --export_dir to be set"
        logging.info("Exporting saved_model (via export_fn) to: {}".format(local_args.export_dir))

        def _export(iterator, fn, args):
          single_node_env(args)
          fn(args)

        # Run on a single exeucutor
        sc.parallelize([1], 1).foreachPartition(lambda it: _export(it, self.export_fn, tf_args))
      else:
        # for TF2.x
        raise Exception("Please use native TF2.x APIs to export a saved_model.")

    return self._copyValues(TFModel(self.args)) 
開發者ID:yahoo,項目名稱:TensorFlowOnSpark,代碼行數:43,代碼來源:pipeline.py

示例11: _transform

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def _transform(self, dataset):
    """Transforms the input DataFrame by applying the _run_model() mapPartitions function.

    Args:
      :dataset: A Spark DataFrame for TensorFlow inferencing.
    """
    spark = SparkSession.builder.getOrCreate()

    # set a deterministic order for input/output columns (lexicographic by key)
    input_cols = [col for col, tensor in sorted(self.getInputMapping().items())]      # input col => input tensor
    output_cols = [col for tensor, col in sorted(self.getOutputMapping().items())]    # output tensor => output col

    # run single-node inferencing on each executor
    logger.info("input_cols: {}".format(input_cols))
    logger.info("output_cols: {}".format(output_cols))

    # merge args + params
    logger.info("===== 1. inference args: {0}".format(self.args))
    logger.info("===== 2. inference params: {0}".format(self._paramMap))
    local_args = self.merge_args_params()
    logger.info("===== 3. inference args + params: {0}".format(local_args))

    tf_args = self.args.argv if self.args.argv else local_args

    _run_model = _run_model_tf1 if version.parse(TF_VERSION) < version.parse("2.0.0") else _run_model_tf2
    rdd_out = dataset.select(input_cols).rdd.mapPartitions(lambda it: _run_model(it, local_args, tf_args))

    # convert to a DataFrame-friendly format
    rows_out = rdd_out.map(lambda x: Row(*x))
    return spark.createDataFrame(rows_out, output_cols)


# global on each python worker process on the executors 
開發者ID:yahoo,項目名稱:TensorFlowOnSpark,代碼行數:35,代碼來源:pipeline.py

示例12: getOrCreate

# 需要導入模塊: from pyspark.context import SparkContext [as 別名]
# 或者: from pyspark.context.SparkContext import getOrCreate [as 別名]
def getOrCreate(self):
            """Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
            new one based on the options set in this builder.

            This method first checks whether there is a valid global default SparkSession, and if
            yes, return that one. If no valid global default SparkSession exists, the method
            creates a new SparkSession and assigns the newly created SparkSession as the global
            default.

            >>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
            >>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
            True

            In case an existing SparkSession is returned, the config options specified
            in this builder will be applied to the existing SparkSession.

            >>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
            >>> s1.conf.get("k1") == s2.conf.get("k1")
            True
            >>> s1.conf.get("k2") == s2.conf.get("k2")
            True
            """
            with self._lock:
                from pyspark.context import SparkContext
                from pyspark.conf import SparkConf
                session = SparkSession._instantiatedSession
                if session is None or session._sc._jsc is None:
                    sparkConf = SparkConf()
                    for key, value in self._options.items():
                        sparkConf.set(key, value)
                    sc = SparkContext.getOrCreate(sparkConf)
                    # This SparkContext may be an existing one.
                    for key, value in self._options.items():
                        # we need to propagate the confs
                        # before we create the SparkSession. Otherwise, confs like
                        # warehouse path and metastore url will not be set correctly (
                        # these confs cannot be changed once the SparkSession is created).
                        sc._conf.set(key, value)
                    session = SparkSession(sc)
                for key, value in self._options.items():
                    session._jsparkSession.sessionState().conf().setConfString(key, value)
                for key, value in self._options.items():
                    session.sparkContext._conf.set(key, value)
                return session 
開發者ID:pingcap,項目名稱:tidb-docker-compose,代碼行數:46,代碼來源:session.py


注:本文中的pyspark.context.SparkContext.getOrCreate方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。