本文整理汇总了Python中pyspark.sql.SparkSession方法的典型用法代码示例。如果您正苦于以下问题:Python sql.SparkSession方法的具体用法?Python sql.SparkSession怎么用?Python sql.SparkSession使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyspark.sql
的用法示例。
在下文中一共展示了sql.SparkSession方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _init
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def _init(self, app_name='omniduct', config=None, master=None, enable_hive_support=False):
"""
Args:
app_name (str): The application name of the SparkSession.
config (dict or None): Any additional configuration to pass through
to the SparkSession builder.
master (str): The Spark master URL to connect to (only necessary
if environment specified configuration is missing).
enable_hive_support (bool): Whether to enable Hive support for the
Spark session.
Note: Pyspark must be installed in order to use this backend.
"""
self.app_name = app_name
self.config = config or {}
self.master = master
self.enable_hive_support = enable_hive_support
self._spark_session = None
# Connection management
示例2: _main
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def _main(sys_argv):
logging.basicConfig()
args = args_parser().parse_args(sys_argv)
# We set spark.sql.files.maxPartitionBytes to a large value since we typically have small number of rows per
# rowgroup. Reading a parquet store with default settings would result in excessively large number of partitions
# and inefficient processing
spark = configure_spark(SparkSession.builder.appName('petastorm-copy'), args) \
.config('spark.sql.files.maxPartitionBytes', '1010612736') \
.getOrCreate()
copy_dataset(spark, args.source_url, args.target_url, args.field_regex, args.not_null_fields, args.overwrite_output,
args.partition_count, args.row_group_size_mb, hdfs_driver=args.hdfs_driver)
spark.stop()
示例3: schema
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
.. note:: Evolving.
:param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
>>> s = spark.readStream.schema(sdf_schema)
>>> s = spark.readStream.schema("col0 INT, col1 DOUBLE")
"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if isinstance(schema, StructType):
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
elif isinstance(schema, basestring):
self._jreader = self._jreader.schema(schema)
else:
raise TypeError("schema should be StructType or string")
return self
示例4: registerTempTable
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.", DeprecationWarning)
self._jdf.createOrReplaceTempView(name)
示例5: createTempView
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def createTempView(self, name):
"""Creates a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
示例6: createOrReplaceTempView
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
示例7: schema
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
>>> s = spark.read.schema("col0 INT, col1 DOUBLE")
"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if isinstance(schema, StructType):
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
elif isinstance(schema, basestring):
self._jreader = self._jreader.schema(schema)
else:
raise TypeError("schema should be StructType or string")
return self
示例8: sparkSession
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def sparkSession(cls):
if not hasattr(cls, "spark"):
# We can't use the SparkSession Builder here, since we need to call
# Scala side's SmvTestHive.createContext to create the HiveTestContext's
# SparkSession.
# So we need to
# * Create a java_gateway
# * Create a SparkConf using the jgw (since without it SparkContext will ignore the given conf)
# * Create python SparkContext using the SparkConf (so we can specify the warehouse.dir)
# * Create Scala side HiveTestContext SparkSession
# * Create python SparkSession
jgw = launch_gateway(None)
jvm = jgw.jvm
import tempfile
import getpass
hivedir = "file://{0}/{1}/smv_hive_test".format(tempfile.gettempdir(), getpass.getuser())
sConf = SparkConf(False, _jvm=jvm).set("spark.sql.test", "")\
.set("spark.sql.hive.metastore.barrierPrefixes",
"org.apache.spark.sql.hive.execution.PairSerDe")\
.set("spark.sql.warehouse.dir", hivedir)\
.set("spark.ui.enabled", "false")
sc = SparkContext(master="local[1]", appName="SMV Python Test", conf=sConf, gateway=jgw).getOrCreate()
jss = sc._jvm.org.apache.spark.sql.hive.test.SmvTestHive.createContext(sc._jsc.sc())
cls.spark = SparkSession(sc, jss.sparkSession())
return cls.spark
示例9: _concat_ids
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def _concat_ids(spark, dataset, columnNames):
"""Concatenates structureId and chainId fields into a single key if chainId
field is present
Parameters
----------
spark : :obj:`SparkSession <pyspark.sql.SparkSession>`
dataset : Dataframe
columnNames : list
"""
if "chainId" in dataset.columns:
dataset.createOrReplaceTempView("table")
sql = "SELECT CONCAT(structureId,'.',chainId) as structureChainId," + \
"structureId,chainId,%s" % ','.join(columnNames) + \
" from table"
dataset = spark.sql(sql)
return dataset
示例10: init_spark_session
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def init_spark_session(app_name):
""" Initializes a Spark Session with the given application name.
Args:
app_name (str): Name of the Spark application. This will also occur in the Spark UI.
"""
global session, context, sql_context
try:
session = SparkSession \
.builder \
.appName(app_name) \
.config("spark.hadoop.dfs.client.use.datanode.hostname", "true") \
.config("spark.hadoop.dfs.datanode.use.datanode.hostname", "true") \
.config("spark.driver.maxResultSize", "4g") \
.getOrCreate()
context = session.sparkContext
context.setLogLevel("ERROR")
sql_context = SQLContext(context)
except Py4JJavaError as err:
raise SparkSessionNotInitializedException(app_name, err.java_exception)
示例11: register_udfs
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def register_udfs(self, sess, sc):
"""Register UDFs to be used in SQL queries.
:type sess: `pyspark.sql.SparkSession`
:param sess: Session used in Spark for SQL queries.
:type sc: `pyspark.SparkContext`
:param sc: Spark Context to run Spark jobs.
"""
sess.udf.register("SQUARED", self.squared, returnType=(
stypes.ArrayType(stypes.StructType(
fields=[stypes.StructField('sku0', stypes.StringType()),
stypes.StructField('norm', stypes.FloatType())]))))
sess.udf.register('INTERSECTIONS',self.process_intersections,
returnType=stypes.ArrayType(stypes.StructType(fields=[
stypes.StructField('sku0', stypes.StringType()),
stypes.StructField('sku1', stypes.StringType()),
stypes.StructField('cor', stypes.FloatType())])))
示例12: save_overwrite_unmanaged_table
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def save_overwrite_unmanaged_table(spark: SparkSession, dataframe: DataFrame, table_name: str, path: str):
"""When trying to read and overwrite the same table, you get this error:
'Cannot overwrite table dw.dim_parking_bay that is also being read from;'
This utility function workarounds this by saving to a temporary table first prior to overwriting.
"""
temp_table_name = table_name + "___temp"
spark.sql("DROP TABLE IF EXISTS " + temp_table_name).collect()
# Save temp table
dataframe.write.saveAsTable(temp_table_name)
# Read temp table and overwrite original table
spark.read.table(temp_table_name)\
.write.mode("overwrite")\
.option("path", path)\
.saveAsTable(table_name)
# Drop temp table
spark.sql("DROP TABLE " + temp_table_name).collect()
示例13: process_data
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def process_data(
database_name: str, table_name: str, td_spark: Optional[TDSparkContext] = None
) -> None:
"""
Load a Treasure Data table and upload it to Treasure Data after PySpark processing.
:param database_name: Target database name on Treasure Data
:param table_name: Target table name on Treasure Data
:param spark: [Optional] SparkSession
"""
if td_spark is None:
td_spark = _prepare_td_spark()
# Read sample_datasets from TD table
access_df = td_spark.table("sample_datasets.www_access").df()
# Process with PySpark
processed_df = access_df.filter("method = 'GET'").withColumn(
"time_str", func.from_unixtime("time")
)
# Upload processed Spark DataFrame to TD
td_spark.create_database_if_not_exists(database_name)
td_spark.create_or_replace(processed_df, f"{database_name}.{table_name}")
示例14: upload_dataframe
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def upload_dataframe(
database_name: str, table_name: str, td_spark: Optional[TDSparkContext] = None
) -> None:
"""
Create Pandas DataFrame and upload it to Treasure Data
:param database_name: Target database name on Treasure Data
:param table_name: Target table name on Treasure Data
:param spark: [Optional] SparkSession
"""
import numpy as np
import pandas as pd
if td_spark is None:
td_spark = _prepare_td_spark()
spark = td_spark.spark
df = pd.DataFrame({"c": np.random.binomial(10, 0.5, 10)})
sdf = spark.createDataFrame(df)
td_spark.create_database_if_not_exists(database_name)
td_spark.create_or_replace(sdf, f"{database_name}.{table_name}")
示例15: _prepare_td_spark
# 需要导入模块: from pyspark import sql [as 别名]
# 或者: from pyspark.sql import SparkSession [as 别名]
def _prepare_td_spark() -> TDSparkContext:
"""
Create SparkSession with local mode setting td-spark specific configurations.
:return: TDSparkContext
"""
apikey = os.environ["TD_API_KEY"]
endpoint = os.environ["TD_API_SERVER"]
site = "us"
if ".co.jp" in endpoint:
site = "jp"
elif "eu01" in endpoint:
site = "eu01"
builder = SparkSession.builder.appName("spark_als")
td = (
TDSparkContextBuilder(builder)
.apikey(apikey)
.site(site)
.jars(TDSparkContextBuilder.default_jar_path())
.build()
)
return td