当前位置: 首页>>代码示例>>Python>>正文


Python shap.TreeExplainer方法代码示例

本文整理汇总了Python中shap.TreeExplainer方法的典型用法代码示例。如果您正苦于以下问题:Python shap.TreeExplainer方法的具体用法?Python shap.TreeExplainer怎么用?Python shap.TreeExplainer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在shap的用法示例。


在下文中一共展示了shap.TreeExplainer方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_explainer

# 需要导入模块: import shap [as 别名]
# 或者: from shap import TreeExplainer [as 别名]
def get_explainer(algorithm, X_train):

        explainer = None
        if algorithm.algorithm_short_name in [
            "Xgboost",
            "Decision Tree",
            "Random Forest",
            "LightGBM",
            "Extra Trees",
            "CatBoost",
        ]:
            explainer = shap.TreeExplainer(algorithm.model)
        if algorithm.algorithm_short_name in ["Linear"]:
            explainer = shap.LinearExplainer(algorithm.model, X_train)

        return explainer 
开发者ID:mljar,项目名称:mljar-supervised,代码行数:18,代码来源:shap.py

示例2: explain_local

# 需要导入模块: import shap [as 别名]
# 或者: from shap import TreeExplainer [as 别名]
def explain_local(self, evaluation_examples, probabilities=None, **kwargs):
        """Use TreeExplainer to get the local feature importances from the trained explainable model.

        :param evaluation_examples: The evaluation examples to compute local feature importances for.
        :type evaluation_examples: numpy or scipy array
        :param probabilities: If output_type is probability, can specify the teacher model's
            probability for scaling the shap values.
        :type probabilities: numpy.ndarray
        :return: The local explanation of feature importances.
        :rtype: Union[list, numpy.ndarray]
        """
        if self._tree_explainer is None:
            self._tree_explainer = shap.TreeExplainer(self._tree)
        evaluation_examples = _get_dense_examples(evaluation_examples)
        return _explain_local_tree_surrogate(self._tree, evaluation_examples, self._tree_explainer,
                                             self._shap_values_output, self._classification,
                                             probabilities, self.multiclass) 
开发者ID:interpretml,项目名称:interpret-community,代码行数:19,代码来源:tree_model.py

示例3: explain_local

# 需要导入模块: import shap [as 别名]
# 或者: from shap import TreeExplainer [as 别名]
def explain_local(self, evaluation_examples, probabilities=None, **kwargs):
        """Use TreeExplainer to get the local feature importances from the trained explainable model.

        :param evaluation_examples: The evaluation examples to compute local feature importances for.
        :type evaluation_examples: numpy or scipy array
        :param probabilities: If output_type is probability, can specify the teacher model's
            probability for scaling the shap values.
        :type probabilities: numpy.ndarray
        :return: The local explanation of feature importances.
        :rtype: Union[list, numpy.ndarray]
        """
        if self._tree_explainer is None:
            self._tree_explainer = shap.TreeExplainer(self._lgbm)
        return _explain_local_tree_surrogate(self._lgbm, evaluation_examples, self._tree_explainer,
                                             self._shap_values_output, self._classification,
                                             probabilities, self.multiclass) 
开发者ID:interpretml,项目名称:interpret-community,代码行数:18,代码来源:lightgbm_model.py

示例4: __init__

# 需要导入模块: import shap [as 别名]
# 或者: from shap import TreeExplainer [as 别名]
def __init__(self, *argv, **kwargs):
        """
        Initialize shap kernelexplainer object.
        """
        super(TreeExplainer, self).__init__(*argv, **kwargs)

        self.explainer = shap.TreeExplainer(*argv, **kwargs) 
开发者ID:IBM,项目名称:AIX360,代码行数:9,代码来源:shap_wrapper.py

示例5: __init__

# 需要导入模块: import shap [as 别名]
# 或者: from shap import TreeExplainer [as 别名]
def __init__(self, multiclass=False, random_state=DEFAULT_RANDOM_STATE,
                 shap_values_output=ShapValuesOutput.DEFAULT, classification=True, **kwargs):
        """Initialize the DecisionTreeExplainableModel.

        :param multiclass: Set to true to generate a multiclass model.
        :type multiclass: bool
        :param random_state: Int to seed the model.
        :type random_state: int
        :param shap_values_output: The type of the output from explain_local when using TreeExplainer.
            Currently only types 'default', 'probability' and 'teacher_probability' are supported.  If
            'probability' is specified, then we approximately scale the raw log-odds values from the
            TreeExplainer to probabilities.
        :type shap_values_output: interpret_community.common.constants.ShapValuesOutput
        :param classification: Indicates if this is a classification or regression explanation.
        :type classification: bool
        """
        self.multiclass = multiclass
        self.random_state = random_state
        kwargs['random_state'] = random_state
        if self.multiclass:
            initializer = DecisionTreeClassifier
        else:
            initializer = DecisionTreeRegressor
        initializer_args = _get_initializer_args(kwargs)
        self._tree = initializer(**initializer_args)
        super(DecisionTreeExplainableModel, self).__init__(**kwargs)
        self._logger.debug('Initializing DecisionTreeExplainableModel')
        self._method = 'tree'
        self._tree_explainer = None
        self._shap_values_output = shap_values_output
        self._classification = classification 
开发者ID:interpretml,项目名称:interpret-community,代码行数:33,代码来源:tree_model.py

示例6: expected_values

# 需要导入模块: import shap [as 别名]
# 或者: from shap import TreeExplainer [as 别名]
def expected_values(self):
        """Use TreeExplainer to get the expected values.

        :return: The expected values of the decision tree tree model.
        :rtype: list
        """
        if self._tree_explainer is None:
            self._tree_explainer = shap.TreeExplainer(self._tree)
        return _expected_values_tree_surrogate(self._tree, self._tree_explainer, self._shap_values_output,
                                               self._classification, self.multiclass) 
开发者ID:interpretml,项目名称:interpret-community,代码行数:12,代码来源:tree_model.py

示例7: __init__

# 需要导入模块: import shap [as 别名]
# 或者: from shap import TreeExplainer [as 别名]
def __init__(self, multiclass=False, random_state=DEFAULT_RANDOM_STATE,
                 shap_values_output=ShapValuesOutput.DEFAULT, classification=True, **kwargs):
        """Initialize the LightGBM Model.

        Additional arguments to LightGBMClassifier and LightGBMRegressor can be passed through kwargs.

        :param multiclass: Set to true to generate a multiclass model.
        :type multiclass: bool
        :param random_state: Int to seed the model.
        :type random_state: int
        :param shap_values_output: The type of the output from explain_local when using TreeExplainer.
            Currently only types 'default', 'probability' and 'teacher_probability' are supported.  If
            'probability' is specified, then we approximately scale the raw log-odds values from the
            TreeExplainer to probabilities.
        :type shap_values_output: interpret_community.common.constants.ShapValuesOutput
        :param classification: Indicates if this is a classification or regression explanation.
        :type classification: bool
        """
        self.multiclass = multiclass
        initializer_args = _get_initializer_args(kwargs)
        if self.multiclass:
            initializer = LGBMClassifier
        else:
            initializer = LGBMRegressor
        self._lgbm = initializer(random_state=random_state, **initializer_args)
        super(LGBMExplainableModel, self).__init__(**kwargs)
        self._logger.debug('Initializing LGBMExplainableModel')
        self._method = 'lightgbm'
        self._tree_explainer = None
        self._shap_values_output = shap_values_output
        self._classification = classification 
开发者ID:interpretml,项目名称:interpret-community,代码行数:33,代码来源:lightgbm_model.py

示例8: test_validate_against_shap

# 需要导入模块: import shap [as 别名]
# 或者: from shap import TreeExplainer [as 别名]
def test_validate_against_shap(self):
        # Validate our explainer against shap library directly
        X, y = shap.datasets.adult()
        x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.02, random_state=7)
        # Fit several classifiers
        tree_classifiers = [create_sklearn_random_forest_classifier(x_train, y_train)]
        non_tree_classifiers = [create_sklearn_logistic_regressor(x_train, y_train)]
        tree_regressors = [create_sklearn_random_forest_regressor(x_train, y_train)]
        non_tree_regressors = [create_sklearn_linear_regressor(x_train, y_train)]
        # For each model, validate we get the same results as calling shap directly
        test_logger.info("Running tree classifiers in test_validate_against_shap")
        for model in tree_classifiers:
            # Run shap directly for comparison
            exp = shap.TreeExplainer(model)
            explanation = exp.shap_values(x_test)
            shap_overall_imp = get_shap_imp_classification(explanation)
            overall_imp = tabular_explainer_imp(model, x_train, x_test)
            validate_correlation(overall_imp, shap_overall_imp, 0.95)

        test_logger.info("Running non tree classifiers in test_validate_against_shap")
        for model in non_tree_classifiers:
            # Run shap directly for comparison
            clustered = shap.kmeans(x_train, 10)
            exp = shap.KernelExplainer(model.predict_proba, clustered)
            explanation = exp.shap_values(x_test)
            shap_overall_imp = get_shap_imp_classification(explanation)
            overall_imp = tabular_explainer_imp(model, x_train, x_test)
            validate_correlation(overall_imp, shap_overall_imp, 0.95)

        test_logger.info("Running tree regressors in test_validate_against_shap")
        for model in tree_regressors:
            # Run shap directly for comparison
            exp = shap.TreeExplainer(model)
            explanation = exp.shap_values(x_test)
            shap_overall_imp = get_shap_imp_regression(explanation)
            overall_imp = tabular_explainer_imp(model, x_train, x_test)
            validate_correlation(overall_imp, shap_overall_imp, 0.95)

        test_logger.info("Running non tree regressors in test_validate_against_shap")
        for model in non_tree_regressors:
            # Run shap directly for comparison
            clustered = shap.kmeans(x_train, 10)
            exp = shap.KernelExplainer(model.predict, clustered)
            explanation = exp.shap_values(x_test)
            shap_overall_imp = get_shap_imp_regression(explanation)
            overall_imp = tabular_explainer_imp(model, x_train, x_test)
            validate_correlation(overall_imp, shap_overall_imp, 0.95) 
开发者ID:interpretml,项目名称:interpret-community,代码行数:49,代码来源:test_validate_explanations.py


注:本文中的shap.TreeExplainer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。