本文整理汇总了Python中pyspark.sql.types.StructType.add方法的典型用法代码示例。如果您正苦于以下问题:Python StructType.add方法的具体用法?Python StructType.add怎么用?Python StructType.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyspark.sql.types.StructType
的用法示例。
在下文中一共展示了StructType.add方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_schema
# 需要导入模块: from pyspark.sql.types import StructType [as 别名]
# 或者: from pyspark.sql.types.StructType import add [as 别名]
def _get_schema(header, schema):
if schema is None or len(schema) == 0:
# Use header to generate schema
if header is None or len(header) == 0:
return None
elif len(header) > 4:
warnings.warn(WARNING_MOVIE_LENS_HEADER)
header = header[:4]
schema = StructType()
try:
schema.add(StructField(header[0], IntegerType())).add(
StructField(header[1], IntegerType())
).add(StructField(header[2], FloatType())).add(
StructField(header[3], LongType())
)
except IndexError:
pass
else:
if header is not None:
warnings.warn(WARNING_HAVE_SCHEMA_AND_HEADER)
if len(schema) > 4:
warnings.warn(WARNING_MOVIE_LENS_HEADER)
schema = schema[:4]
return schema
示例2: get_spark_schema
# 需要导入模块: from pyspark.sql.types import StructType [as 别名]
# 或者: from pyspark.sql.types.StructType import add [as 别名]
def get_spark_schema(header=DEFAULT_HEADER):
## create schema
schema = StructType()
## do label + ints
n_ints = 14
for i in range(n_ints):
schema.add(StructField(header[i], IntegerType()))
## do categoricals
for i in range(26):
schema.add(StructField(header[i + n_ints], StringType()))
return schema
示例3: __init__
# 需要导入模块: from pyspark.sql.types import StructType [as 别名]
# 或者: from pyspark.sql.types.StructType import add [as 别名]
def __init__(self, scoreAndLabels):
sc = scoreAndLabels.ctx
sql_ctx = SQLContext.getOrCreate(sc)
numCol = len(scoreAndLabels.first())
schema = StructType([
StructField("score", DoubleType(), nullable=False),
StructField("label", DoubleType(), nullable=False)])
if numCol == 3:
schema.add("weight", DoubleType(), False)
df = sql_ctx.createDataFrame(scoreAndLabels, schema=schema)
java_class = sc._jvm.org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
java_model = java_class(df._jdf)
super(BinaryClassificationMetrics, self).__init__(java_model)
示例4: __init__
# 需要导入模块: from pyspark.sql.types import StructType [as 别名]
# 或者: from pyspark.sql.types.StructType import add [as 别名]
def __init__(self, predAndLabelsWithOptWeight):
sc = predAndLabelsWithOptWeight.ctx
sql_ctx = SQLContext.getOrCreate(sc)
numCol = len(predAndLabelsWithOptWeight.first())
schema = StructType([
StructField("prediction", DoubleType(), nullable=False),
StructField("label", DoubleType(), nullable=False)])
if (numCol == 3):
schema.add("weight", DoubleType(), False)
df = sql_ctx.createDataFrame(predAndLabelsWithOptWeight, schema)
java_class = sc._jvm.org.apache.spark.mllib.evaluation.MulticlassMetrics
java_model = java_class(df._jdf)
super(MulticlassMetrics, self).__init__(java_model)
示例5: _create_from_pandas_with_arrow
# 需要导入模块: from pyspark.sql.types import StructType [as 别名]
# 或者: from pyspark.sql.types.StructType import add [as 别名]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from distutils.version import LooseVersion
from pyspark.serializers import ArrowStreamPandasSerializer
from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
import pyarrow as pa
# Create the Spark schema from list of names passed in with Arrow types
if isinstance(schema, (list, tuple)):
if LooseVersion(pa.__version__) < LooseVersion("0.12.0"):
temp_batch = pa.RecordBatch.from_pandas(pdf[0:100], preserve_index=False)
arrow_schema = temp_batch.schema
else:
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
struct = StructType()
for name, field in zip(schema, arrow_schema):
struct.add(name, from_arrow_type(field.type), nullable=field.nullable)
schema = struct
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create list of Arrow (columns, type) for serializer dump_stream
arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]
for pdf_slice in pdf_slices]
jsqlContext = self._wrapped._jsqlContext
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
col_by_name = True # col by name only applies to StructType columns, can't happen here
ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df