當前位置: 首頁>>代碼示例>>Python>>正文


Python ml.Model方法代碼示例

本文整理匯總了Python中pyspark.ml.Model方法的典型用法代碼示例。如果您正苦於以下問題:Python ml.Model方法的具體用法?Python ml.Model怎麽用?Python ml.Model使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pyspark.ml的用法示例。


在下文中一共展示了ml.Model方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_java_params

# 需要導入模塊: from pyspark import ml [as 別名]
# 或者: from pyspark.ml import Model [as 別名]
def test_java_params(self):
        import pyspark.ml.feature
        import pyspark.ml.classification
        import pyspark.ml.clustering
        import pyspark.ml.evaluation
        import pyspark.ml.pipeline
        import pyspark.ml.recommendation
        import pyspark.ml.regression

        modules = [pyspark.ml.feature, pyspark.ml.classification, pyspark.ml.clustering,
                   pyspark.ml.evaluation, pyspark.ml.pipeline, pyspark.ml.recommendation,
                   pyspark.ml.regression]
        for module in modules:
            for name, cls in inspect.getmembers(module, inspect.isclass):
                if not name.endswith('Model') and not name.endswith('Params')\
                        and issubclass(cls, JavaParams) and not inspect.isabstract(cls):
                    # NOTE: disable check_params_exist until there is parity with Scala API
                    ParamTests.check_params(self, cls(), check_params_exist=False)

        # Additional classes that need explicit construction
        from pyspark.ml.feature import CountVectorizerModel, StringIndexerModel
        ParamTests.check_params(self, CountVectorizerModel.from_vocabulary(['a'], 'input'),
                                check_params_exist=False)
        ParamTests.check_params(self, StringIndexerModel.from_labels(['a', 'b'], 'input'),
                                check_params_exist=False) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:27,代碼來源:tests.py

示例2: _make_java_param_pair

# 需要導入模塊: from pyspark import ml [as 別名]
# 或者: from pyspark.ml import Model [as 別名]
def _make_java_param_pair(self, param, value):
        """
        Makes a Java param pair.
        """
        sc = SparkContext._active_spark_context
        param = self._resolveParam(param)
        _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest",
                                             self.uid)
        java_param = _java_obj.getParam(param.name)
        if isinstance(value, JavaParams):
            # used in the case of an estimator having another estimator as a parameter
            # the reason why this is not in _py2java in common.py is that importing
            # Estimator and Model in common.py results in a circular import with inherit_doc
            java_value = value._to_java()
        else:
            java_value = _py2java(sc, value)
        return java_param.w(java_value) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:19,代碼來源:classification.py

示例3: test_idf

# 需要導入模塊: from pyspark import ml [as 別名]
# 或者: from pyspark.ml import Model [as 別名]
def test_idf(self):
        dataset = self.spark.createDataFrame([
            (DenseVector([1.0, 2.0]),),
            (DenseVector([0.0, 1.0]),),
            (DenseVector([3.0, 0.2]),)], ["tf"])
        idf0 = IDF(inputCol="tf")
        self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
        idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
        self.assertEqual(idf0m.uid, idf0.uid,
                         "Model should inherit the UID from its parent estimator.")
        output = idf0m.transform(dataset)
        self.assertIsNotNone(output.head().idf)
        # Test that parameters transferred to Python Model
        ParamTests.check_params(self, idf0m) 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:16,代碼來源:tests.py

示例4: coefficients

# 需要導入模塊: from pyspark import ml [as 別名]
# 或者: from pyspark.ml import Model [as 別名]
def coefficients(self):
        """
        Model coefficients of Linear SVM Classifier.
        """
        return self._call_java("coefficients") 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:7,代碼來源:classification.py

示例5: intercept

# 需要導入模塊: from pyspark import ml [as 別名]
# 或者: from pyspark.ml import Model [as 別名]
def intercept(self):
        """
        Model intercept of Linear SVM Classifier.
        """
        return self._call_java("intercept") 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:7,代碼來源:classification.py

示例6: coefficientMatrix

# 需要導入模塊: from pyspark import ml [as 別名]
# 或者: from pyspark.ml import Model [as 別名]
def coefficientMatrix(self):
        """
        Model coefficients.
        """
        return self._call_java("coefficientMatrix") 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:7,代碼來源:classification.py

示例7: interceptVector

# 需要導入模塊: from pyspark import ml [as 別名]
# 或者: from pyspark.ml import Model [as 別名]
def interceptVector(self):
        """
        Model intercept.
        """
        return self._call_java("interceptVector") 
開發者ID:runawayhorse001,項目名稱:LearningApacheSpark,代碼行數:7,代碼來源:classification.py

示例8: _validate_model

# 需要導入模塊: from pyspark import ml [as 別名]
# 或者: from pyspark.ml import Model [as 別名]
def _validate_model(spark_model):
    from pyspark.ml.util import MLReadable, MLWritable
    from pyspark.ml import Model as PySparkModel
    if not isinstance(spark_model, PySparkModel) \
            or not isinstance(spark_model, MLReadable) \
            or not isinstance(spark_model, MLWritable):
        raise MlflowException(
            "Cannot serialize this model. MLflow can only save descendants of pyspark.Model"
            "that implement MLWritable and MLReadable.",
            INVALID_PARAMETER_VALUE) 
開發者ID:mlflow,項目名稱:mlflow,代碼行數:12,代碼來源:spark.py


注:本文中的pyspark.ml.Model方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。