本文整理汇总了Python中pyspark.RDD属性的典型用法代码示例。如果您正苦于以下问题:Python pyspark.RDD属性的具体用法?Python pyspark.RDD怎么用?Python pyspark.RDD使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类pyspark
的用法示例。
在下文中一共展示了pyspark.RDD属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: clean_claims
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def clean_claims(claims: RDD, b_item_map: Broadcast):
def clean(claim):
item_map = b_item_map.value
if claim.datatype == 'wikibase-item':
if claim.object in item_map:
claim = claim._replace(object=item_map[claim.object])
return claim
else:
return None
elif claim.datatype == 'quantity':
unit = claim.object.unit
unit = unit.split('/')[-1]
if unit in item_map:
claim = claim._replace(object=item_map[unit])
return claim
else:
return None
return claim
dt_filter = {'wikibase-item', 'string', 'monolingualtext', 'quantity', 'time'}
return claims.filter(lambda c: c.datatype in dt_filter).map(clean).filter(lambda c: c is not None)
示例2: partition_per_row
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def partition_per_row(rdd: RDD) -> RDD:
"""Place each row in an RDD into a separate partition.
Only useful if that row represents something large to be computed over,
perhaps an external resource such as a multi-gb training dataset. The spark
part of the dataset is expected to be tiny and easily fit in a single
partition.
"""
num_rows = rdd.count()
# Help out mypy. Also don't use `identity`, as it somehow fails serialization
partition_fn = cast(Callable[[int], int], lambda x: x)
return (
# bring everything together and assign each row a partition id
rdd.repartition(1)
.mapPartitions(lambda rows: enumerate(rows))
# Partition by the new parition_id
.partitionBy(num_rows, partition_fn)
# Drop the partition id, giving back the origional shape
.map(lambda pair: pair[1])
)
# Shared joins
示例3: apply
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def apply(self, data_points: RDD, fault_tolerant: bool = False) -> np.ndarray:
"""Label PySpark RDD of data points with LFs.
Parameters
----------
data_points
PySpark RDD containing data points to be labeled by LFs
fault_tolerant
Output ``-1`` if LF execution fails?
Returns
-------
np.ndarray
Matrix of labels emitted by LFs
"""
f_caller = _FunctionCaller(fault_tolerant)
def map_fn(args: Tuple[DataPoint, int]) -> RowData:
return apply_lfs_to_data_point(*args, lfs=self._lfs, f_caller=f_caller)
labels = data_points.zipWithIndex().map(map_fn).collect()
return self._numpy_from_row_data(labels)
示例4: _py2java
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def _py2java(sc, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, list):
obj = [_py2java(sc, x) for x in obj]
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(data)
return obj
示例5: train
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, it can be used for
document classification. By making every vector a 0-1 vector,
it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}).
The input feature values must be nonnegative.
:param data:
RDD of LabeledPoint.
:param lambda_:
The smoothing parameter.
(default: 1.0)
"""
first = data.first()
if not isinstance(first, LabeledPoint):
raise ValueError("`data` should be an RDD of LabeledPoint")
labels, pi, theta = callMLlibFunc("trainNaiveBayesModel", data, lambda_)
return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
示例6: predict
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def predict(self, x):
"""
Predict the label of one or more examples.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
:param x:
Data point (feature vector), or an RDD of data points (feature
vectors).
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
示例7: predict
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def predict(self, x):
"""
Predict labels for provided features.
Using a piecewise linear function.
1) If x exactly matches a boundary then associated prediction
is returned. In case there are multiple predictions with the
same boundary then one of them is returned. Which one is
undefined (same as java.util.Arrays.binarySearch).
2) If x is lower or higher than all boundaries then first or
last prediction is returned respectively. In case there are
multiple predictions with the same boundary then the lowest
or highest is returned respectively.
3) If x falls between two values in boundary array then
prediction is treated as piecewise linear function and
interpolated value is returned. In case there are multiple
values with the same boundary then the same rules as in 2)
are used.
:param x:
Feature or RDD of Features to be labeled.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
return np.interp(x, self.boundaries, self.predictions)
示例8: _py2java
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def _py2java(sc, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, list):
obj = [_py2java(sc, x) for x in obj]
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(data)
return obj
示例9: rows
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def rows(self):
"""
Rows of the IndexedRowMatrix stored as an RDD of IndexedRows.
>>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(1, [4, 5, 6])]))
>>> rows = mat.rows
>>> rows.first()
IndexedRow(0, [1.0,2.0,3.0])
"""
# We use DataFrames for serialization of IndexedRows from
# Java, so we first convert the RDD of rows to a DataFrame
# on the Scala/Java side. Then we map each Row in the
# DataFrame back to an IndexedRow on this side.
rows_df = callMLlibFunc("getIndexedRows", self._java_matrix_wrapper._java_model)
rows = rows_df.rdd.map(lambda row: IndexedRow(row[0], row[1]))
return rows
示例10: entries
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def entries(self):
"""
Entries of the CoordinateMatrix stored as an RDD of
MatrixEntries.
>>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2),
... MatrixEntry(6, 4, 2.1)]))
>>> entries = mat.entries
>>> entries.first()
MatrixEntry(0, 0, 1.2)
"""
# We use DataFrames for serialization of MatrixEntry entries
# from Java, so we first convert the RDD of entries to a
# DataFrame on the Scala/Java side. Then we map each Row in
# the DataFrame back to a MatrixEntry on this side.
entries_df = callMLlibFunc("getMatrixEntries", self._java_matrix_wrapper._java_model)
entries = entries_df.rdd.map(lambda row: MatrixEntry(row[0], row[1], row[2]))
return entries
示例11: blocks
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def blocks(self):
"""
The RDD of sub-matrix blocks
((blockRowIndex, blockColIndex), sub-matrix) that form this
distributed matrix.
>>> mat = BlockMatrix(
... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2)
>>> blocks = mat.blocks
>>> blocks.first()
((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0))
"""
# We use DataFrames for serialization of sub-matrix blocks
# from Java, so we first convert the RDD of blocks to a
# DataFrame on the Scala/Java side. Then we map each Row in
# the DataFrame back to a sub-matrix block on this side.
blocks_df = callMLlibFunc("getMatrixBlocks", self._java_matrix_wrapper._java_model)
blocks = blocks_df.rdd.map(lambda row: ((row[0][0], row[0][1]), row[1]))
return blocks
示例12: pprint
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def pprint(self, num=10):
"""
Print the first num elements of each RDD generated in this DStream.
@param num: the number of elements from the first will be printed.
"""
def takeAndPrint(time, rdd):
taken = rdd.take(num + 1)
print("-------------------------------------------")
print("Time: %s" % time)
print("-------------------------------------------")
for record in taken[:num]:
print(record)
if len(taken) > num:
print("...")
print("")
self.foreachRDD(takeAndPrint)
示例13: transformWith
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def transformWith(self, func, other, keepSerializer=False):
"""
Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream and 'other' DStream.
`func` can have two arguments of (`rdd_a`, `rdd_b`) or have three
arguments of (`time`, `rdd_a`, `rdd_b`)
"""
if func.__code__.co_argcount == 2:
oldfunc = func
func = lambda t, a, b: oldfunc(a, b)
assert func.__code__.co_argcount == 3, "func should take two or three arguments"
jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer, other._jrdd_deserializer)
dstream = self._sc._jvm.PythonTransformed2DStream(self._jdstream.dstream(),
other._jdstream.dstream(), jfunc)
jrdd_serializer = self._jrdd_deserializer if keepSerializer else self._sc.serializer
return DStream(dstream.asJavaDStream(), self._ssc, jrdd_serializer)
示例14: window
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def window(self, windowDuration, slideDuration=None):
"""
Return a new DStream in which each RDD contains all the elements in seen in a
sliding window of time over this DStream.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
self._validate_window_param(windowDuration, slideDuration)
d = self._ssc._jduration(windowDuration)
if slideDuration is None:
return DStream(self._jdstream.window(d), self._ssc, self._jrdd_deserializer)
s = self._ssc._jduration(slideDuration)
return DStream(self._jdstream.window(d, s), self._ssc, self._jrdd_deserializer)
示例15: countByValueAndWindow
# 需要导入模块: import pyspark [as 别名]
# 或者: from pyspark import RDD [as 别名]
def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None):
"""
Return a new DStream in which each RDD contains the count of distinct elements in
RDDs in a sliding window over this DStream.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
@param numPartitions: number of partitions of each RDD in the new DStream.
"""
keyed = self.map(lambda x: (x, 1))
counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub,
windowDuration, slideDuration, numPartitions)
return counted.filter(lambda kv: kv[1] > 0)