本文整理汇总了Python中pyspark.ml.Model方法的典型用法代码示例。如果您正苦于以下问题:Python ml.Model方法的具体用法?Python ml.Model怎么用?Python ml.Model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyspark.ml
的用法示例。
在下文中一共展示了ml.Model方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_java_params
# 需要导入模块: from pyspark import ml [as 别名]
# 或者: from pyspark.ml import Model [as 别名]
def test_java_params(self):
import pyspark.ml.feature
import pyspark.ml.classification
import pyspark.ml.clustering
import pyspark.ml.evaluation
import pyspark.ml.pipeline
import pyspark.ml.recommendation
import pyspark.ml.regression
modules = [pyspark.ml.feature, pyspark.ml.classification, pyspark.ml.clustering,
pyspark.ml.evaluation, pyspark.ml.pipeline, pyspark.ml.recommendation,
pyspark.ml.regression]
for module in modules:
for name, cls in inspect.getmembers(module, inspect.isclass):
if not name.endswith('Model') and not name.endswith('Params')\
and issubclass(cls, JavaParams) and not inspect.isabstract(cls):
# NOTE: disable check_params_exist until there is parity with Scala API
ParamTests.check_params(self, cls(), check_params_exist=False)
# Additional classes that need explicit construction
from pyspark.ml.feature import CountVectorizerModel, StringIndexerModel
ParamTests.check_params(self, CountVectorizerModel.from_vocabulary(['a'], 'input'),
check_params_exist=False)
ParamTests.check_params(self, StringIndexerModel.from_labels(['a', 'b'], 'input'),
check_params_exist=False)
示例2: _make_java_param_pair
# 需要导入模块: from pyspark import ml [as 别名]
# 或者: from pyspark.ml import Model [as 别名]
def _make_java_param_pair(self, param, value):
"""
Makes a Java param pair.
"""
sc = SparkContext._active_spark_context
param = self._resolveParam(param)
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest",
self.uid)
java_param = _java_obj.getParam(param.name)
if isinstance(value, JavaParams):
# used in the case of an estimator having another estimator as a parameter
# the reason why this is not in _py2java in common.py is that importing
# Estimator and Model in common.py results in a circular import with inherit_doc
java_value = value._to_java()
else:
java_value = _py2java(sc, value)
return java_param.w(java_value)
示例3: test_idf
# 需要导入模块: from pyspark import ml [as 别名]
# 或者: from pyspark.ml import Model [as 别名]
def test_idf(self):
dataset = self.spark.createDataFrame([
(DenseVector([1.0, 2.0]),),
(DenseVector([0.0, 1.0]),),
(DenseVector([3.0, 0.2]),)], ["tf"])
idf0 = IDF(inputCol="tf")
self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
self.assertEqual(idf0m.uid, idf0.uid,
"Model should inherit the UID from its parent estimator.")
output = idf0m.transform(dataset)
self.assertIsNotNone(output.head().idf)
# Test that parameters transferred to Python Model
ParamTests.check_params(self, idf0m)
示例4: coefficients
# 需要导入模块: from pyspark import ml [as 别名]
# 或者: from pyspark.ml import Model [as 别名]
def coefficients(self):
"""
Model coefficients of Linear SVM Classifier.
"""
return self._call_java("coefficients")
示例5: intercept
# 需要导入模块: from pyspark import ml [as 别名]
# 或者: from pyspark.ml import Model [as 别名]
def intercept(self):
"""
Model intercept of Linear SVM Classifier.
"""
return self._call_java("intercept")
示例6: coefficientMatrix
# 需要导入模块: from pyspark import ml [as 别名]
# 或者: from pyspark.ml import Model [as 别名]
def coefficientMatrix(self):
"""
Model coefficients.
"""
return self._call_java("coefficientMatrix")
示例7: interceptVector
# 需要导入模块: from pyspark import ml [as 别名]
# 或者: from pyspark.ml import Model [as 别名]
def interceptVector(self):
"""
Model intercept.
"""
return self._call_java("interceptVector")
示例8: _validate_model
# 需要导入模块: from pyspark import ml [as 别名]
# 或者: from pyspark.ml import Model [as 别名]
def _validate_model(spark_model):
from pyspark.ml.util import MLReadable, MLWritable
from pyspark.ml import Model as PySparkModel
if not isinstance(spark_model, PySparkModel) \
or not isinstance(spark_model, MLReadable) \
or not isinstance(spark_model, MLWritable):
raise MlflowException(
"Cannot serialize this model. MLflow can only save descendants of pyspark.Model"
"that implement MLWritable and MLReadable.",
INVALID_PARAMETER_VALUE)