当前位置: 首页>>代码示例>>Python>>正文


Python mlflow.set_tracking_uri方法代码示例

本文整理汇总了Python中mlflow.set_tracking_uri方法的典型用法代码示例。如果您正苦于以下问题:Python mlflow.set_tracking_uri方法的具体用法?Python mlflow.set_tracking_uri怎么用?Python mlflow.set_tracking_uri使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mlflow的用法示例。


在下文中一共展示了mlflow.set_tracking_uri方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_new_or_continue_experiment

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def create_new_or_continue_experiment(project_dir: str):
    """
    Creates a new experiment or continues already existing one.

    Experiment name is the name of the project_dir

    Parameters
    ----------
    project_dir
        project directory
    """
    mlflow.set_tracking_uri(None)
    experiment_name = project_utils.get_project_name_from_directory(project_dir)
    if "MLFLOW_TRACKING_URI" not in os.environ:
        tracking_uri = os.path.join(os.path.split(project_dir)[0], "mlruns")
        tracking_uri = os.path.realpath(tracking_uri)
        mlflow.set_tracking_uri(tracking_uri)
    mlflow.set_experiment(experiment_name) 
开发者ID:audi,项目名称:nucleus7,代码行数:20,代码来源:mlflow_utils.py

示例2: test_mlflow_context_log_metadata

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def test_mlflow_context_log_metadata(MockClient, tmpdir, metadata):
    """
    Test that call to wrapped function initiates MLflow logging or throws warning
    """
    metadata = Machine(**metadata)
    mlflow.set_tracking_uri(f"file:{tmpdir}")

    mock_client = MockClient()
    mock_client.log_batch.return_value = "test"

    # Function with a metadata dict returned
    with mlu.mlflow_context("returns metadata", "unique_key", {}, {}) as (
        mlflow_client,
        run_id,
    ):
        mlu.log_machine(mlflow_client, run_id, metadata)

    assert mock_client.log_batch.called 
开发者ID:equinor,项目名称:gordo,代码行数:20,代码来源:test_mlflow_reporter.py

示例3: test_docker_project_tracking_uri_propagation

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def test_docker_project_tracking_uri_propagation(
        ProfileConfigProvider, tmpdir, tracking_uri,
        expected_command_segment, docker_example_base_image):  # pylint: disable=unused-argument
    mock_provider = mock.MagicMock()
    mock_provider.get_config.return_value = \
        DatabricksConfig("host", "user", "pass", None, insecure=True)
    ProfileConfigProvider.return_value = mock_provider
    # Create and mock local tracking directory
    local_tracking_dir = os.path.join(tmpdir.strpath, "mlruns")
    if tracking_uri is None:
        tracking_uri = local_tracking_dir
    old_uri = mlflow.get_tracking_uri()
    try:
        mlflow.set_tracking_uri(tracking_uri)
        with mock.patch("mlflow.tracking._tracking_service.utils._get_store") as _get_store_mock:
            _get_store_mock.return_value = file_store.FileStore(local_tracking_dir)
            mlflow.projects.run(
                TEST_DOCKER_PROJECT_DIR, experiment_id=file_store.FileStore.DEFAULT_EXPERIMENT_ID)
    finally:
        mlflow.set_tracking_uri(old_uri) 
开发者ID:mlflow,项目名称:mlflow,代码行数:22,代码来源:test_docker_projects.py

示例4: test_model_log

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def test_model_log(h2o_iris_model):
    h2o_model = h2o_iris_model.model
    old_uri = mlflow.get_tracking_uri()
    # should_start_run tests whether or not calling log_model() automatically starts a run.
    for should_start_run in [False, True]:
        with TempDir(chdr=True, remove_on_exit=True):
            try:
                artifact_path = "gbm_model"
                mlflow.set_tracking_uri("test")
                if should_start_run:
                    mlflow.start_run()
                mlflow.h2o.log_model(h2o_model=h2o_model, artifact_path=artifact_path)
                model_uri = "runs:/{run_id}/{artifact_path}".format(
                    run_id=mlflow.active_run().info.run_id,
                    artifact_path=artifact_path)

                # Load model
                h2o_model_loaded = mlflow.h2o.load_model(model_uri=model_uri)
                assert all(
                    h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() ==
                    h2o_model.predict(h2o_iris_model.inference_data).as_data_frame())
            finally:
                mlflow.end_run()
                mlflow.set_tracking_uri(old_uri) 
开发者ID:mlflow,项目名称:mlflow,代码行数:26,代码来源:test_h2o_model_export.py

示例5: test_upload_as_model

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def test_upload_as_model(self, iris, tabular_explainer, tracking_uri):
        mlflow.set_tracking_uri(tracking_uri)
        x_train = iris[DatasetConstants.X_TRAIN]
        x_test = iris[DatasetConstants.X_TEST]
        y_train = iris[DatasetConstants.Y_TRAIN]

        model = create_sklearn_random_forest_classifier(x_train, y_train)

        explainer = tabular_explainer(model, x_train)
        global_explanation = explainer.explain_global(x_test)
        mlflow.set_experiment(TEST_EXPERIMENT)
        with mlflow.start_run() as run:
            log_explanation(TEST_EXPLANATION, global_explanation)
            os.makedirs(TEST_DOWNLOAD, exist_ok=True)
            run_id = run.info.run_id
        downloaded_explanation_mlflow = get_explanation(run_id, TEST_EXPLANATION)
        _assert_explanation_equivalence(global_explanation, downloaded_explanation_mlflow) 
开发者ID:interpretml,项目名称:interpret-community,代码行数:19,代码来源:test_mlflow.py

示例6: test_upload_two_explanations

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def test_upload_two_explanations(self, iris, tabular_explainer, tracking_uri):
        mlflow.set_tracking_uri(tracking_uri)
        x_train = iris[DatasetConstants.X_TRAIN]
        x_test = iris[DatasetConstants.X_TEST]
        y_train = iris[DatasetConstants.Y_TRAIN]

        model = create_sklearn_random_forest_classifier(x_train, y_train)

        explainer = tabular_explainer(model, x_train)
        global_explanation = explainer.explain_global(x_test)
        local_explanation = explainer.explain_local(x_test)
        mlflow.set_experiment(TEST_EXPERIMENT)
        with mlflow.start_run() as run:
            log_explanation('global_explanation', global_explanation)
            log_explanation('local_explanation', local_explanation)
            os.makedirs(TEST_DOWNLOAD, exist_ok=True)
            run_id = run.info.run_id
        downloaded_explanation_mlflow = get_explanation(run_id, 'global_explanation')
        _assert_explanation_equivalence(global_explanation, downloaded_explanation_mlflow) 
开发者ID:interpretml,项目名称:interpret-community,代码行数:21,代码来源:test_mlflow.py

示例7: init_experiment

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def init_experiment(self, experiment_name, run_name=None, nested=True):
        try:
            mlflow.set_tracking_uri(self.tracking_uri)
            mlflow.set_experiment(experiment_name)
            mlflow.start_run(run_name=run_name, nested=nested)
        except ConnectionError:
            raise Exception(
                f"MLFlow cannot connect to the remote server at {self.tracking_uri}.\n"
                f"MLFlow also supports logging runs locally to files. Set the MLFlowLogger "
                f"tracking_uri to an empty string to use that."
            ) 
开发者ID:deepset-ai,项目名称:FARM,代码行数:13,代码来源:utils.py

示例8: setUp

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def setUp(self):
        TestCaseWithReset.setUp(self)
        TestCaseWithTempDir.setUp(self)
        if "MLFLOW_TRACKING_URI" in os.environ:
            del os.environ["MLFLOW_TRACKING_URI"]
        mlflow.set_tracking_uri(None) 
开发者ID:audi,项目名称:nucleus7,代码行数:8,代码来源:mlflow_utils_test.py

示例9: configure

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def configure(
        self,
        run_uuid,
        experiment_name,
        tracking_uri,
        run_name=None,
        always_log_artifacts=False,
        create_run=True,
        create_experiment=True,
        nest_run=True,
    ):
        if mlflow.active_run() and not nest_run:
            logger.info('Ending previous MLFlow run: {}.'.format(self.run_uuid))
            mlflow.end_run()

        self.always_log_artifacts = always_log_artifacts
        self._experiment_name = experiment_name
        self._run_name = run_name

        # MLflow specific
        if tracking_uri:
            mlflow.set_tracking_uri(tracking_uri)

        if run_uuid:
            existing_run = MlflowClient().get_run(run_uuid)
            if not existing_run and not create_run:
                raise FileNotFoundError(
                    'Run ID {} not found under {}'.format(
                        run_uuid, mlflow.get_tracking_uri()
                    )
                )

        experiment_id = self._retrieve_mlflow_experiment_id(
            experiment_name, create=create_experiment
        )
        return mlflow.start_run(
            run_uuid,
            experiment_id=experiment_id,
            run_name=run_name,
            nested=nest_run,
        ) 
开发者ID:Unbabel,项目名称:OpenKiwi,代码行数:43,代码来源:loggers.py

示例10: runner

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def runner(tmpdir):
    mlflow.set_tracking_uri(f"file:{tmpdir}")
    yield CliRunner() 
开发者ID:equinor,项目名称:gordo,代码行数:5,代码来源:test_cli.py

示例11: mlflow_context

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def mlflow_context(
    name: str,
    model_key: str = uuid4().hex,
    workspace_kwargs: dict = {},
    service_principal_kwargs: dict = {},
):
    """
    Generate MLflow logger function with either a local or AzureML backend

    Parameters
    ----------
    name: str
        The name of the log group to log to (e.g. a model name).
    model_key: str
        Unique ID of logging run.
    workspace_kwargs: dict
        AzureML Workspace configuration to use for remote MLFlow tracking. See
        :func:`gordo.builder.mlflow_utils.get_mlflow_client`.
    service_principal_kwargs: dict
        AzureML ServicePrincipalAuthentication keyword arguments. See
        :func:`gordo.builder.mlflow_utils.get_mlflow_client`

    Example
    -------
    >>> with tempfile.TemporaryDirectory as tmp_dir:
    ...     mlflow.set_tracking_uri(f"file:{tmp_dir}")
    ...     with mlflow_context("log_group", "unique_key", {}, {}) as (mlflow_client, run_id):
    ...         log_machine(machine) # doctest: +SKIP
    """
    mlflow_client = get_mlflow_client(workspace_kwargs, service_principal_kwargs)
    run_id = get_run_id(mlflow_client, experiment_name=name, model_key=model_key)

    logger.info(
        f"MLflow client configured to use {'AzureML' if workspace_kwargs else 'local backend'}"
    )

    yield mlflow_client, run_id

    mlflow_client.set_terminated(run_id) 
开发者ID:equinor,项目名称:gordo,代码行数:41,代码来源:mlflow.py

示例12: start

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def start(self):
        """Start the whole thing"""

        self._setup_logging()

        if self.generate_config:
            self.write_config()

        #
        # Setup mlflow
        #
        import mlflow
        mlflow.set_tracking_uri(self.mlflow_server)
        experiment_id = mlflow.set_experiment(self.name)

        #
        # Run the script under mlflow
        #
        with mlflow.start_run(experiment_id=experiment_id):
            #
            # Log the run parametres to mlflow.
            #
            mlflow.log_param("results_path", self.results_path)

            cls = self.__class__
            for k, trait in sorted(cls.class_own_traits(config=True).items()):
                mlflow.log_param(trait.name, repr(trait.get(self)))

            self.run() 
开发者ID:leokarlin,项目名称:LaSO,代码行数:31,代码来源:experiment.py

示例13: tracking_uri_mock

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def tracking_uri_mock(tmpdir, request):
    try:
        if 'notrackingurimock' not in request.keywords:
            tracking_uri = path_to_local_sqlite_uri(
                os.path.join(tmpdir.strpath, 'mlruns'))
            mlflow.set_tracking_uri(tracking_uri)
            os.environ["MLFLOW_TRACKING_URI"] = tracking_uri
        yield tmpdir
    finally:
        mlflow.set_tracking_uri(None)
        if 'notrackingurimock' not in request.keywords:
            del os.environ["MLFLOW_TRACKING_URI"] 
开发者ID:mlflow,项目名称:mlflow,代码行数:14,代码来源:conftest.py

示例14: test_get_tracking_uri_for_run

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def test_get_tracking_uri_for_run():
    mlflow.set_tracking_uri("http://some-uri")
    assert databricks._get_tracking_uri_for_run() == "http://some-uri"
    mlflow.set_tracking_uri("databricks://profile")
    assert databricks._get_tracking_uri_for_run() == "databricks"
    mlflow.set_tracking_uri(None)
    with mock.patch.dict(os.environ, {mlflow.tracking._TRACKING_URI_ENV_VAR: "http://some-uri"}):
        assert mlflow.tracking._tracking_service.utils.get_tracking_uri() == "http://some-uri" 
开发者ID:mlflow,项目名称:mlflow,代码行数:10,代码来源:test_databricks.py

示例15: test_sparkml_model_log

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import set_tracking_uri [as 别名]
def test_sparkml_model_log(tmpdir, spark_model_iris):
    # Print the coefficients and intercept for multinomial logistic regression
    old_tracking_uri = mlflow.get_tracking_uri()
    cnt = 0
    # should_start_run tests whether or not calling log_model() automatically starts a run.
    for should_start_run in [False, True]:
        for dfs_tmp_dir in [None, os.path.join(str(tmpdir), "test")]:
            print("should_start_run =", should_start_run, "dfs_tmp_dir =", dfs_tmp_dir)
            try:
                tracking_dir = os.path.abspath(str(tmpdir.join("mlruns")))
                mlflow.set_tracking_uri("file://%s" % tracking_dir)
                if should_start_run:
                    mlflow.start_run()
                artifact_path = "model%d" % cnt
                cnt += 1
                sparkm.log_model(artifact_path=artifact_path, spark_model=spark_model_iris.model,
                                 dfs_tmpdir=dfs_tmp_dir)
                model_uri = "runs:/{run_id}/{artifact_path}".format(
                    run_id=mlflow.active_run().info.run_id,
                    artifact_path=artifact_path)

                # test reloaded model
                reloaded_model = sparkm.load_model(model_uri=model_uri, dfs_tmpdir=dfs_tmp_dir)
                preds_df = reloaded_model.transform(spark_model_iris.spark_df)
                preds = [x.prediction for x in preds_df.select("prediction").collect()]
                assert spark_model_iris.predictions == preds
            finally:
                mlflow.end_run()
                mlflow.set_tracking_uri(old_tracking_uri)
                x = dfs_tmp_dir or sparkm.DFS_TMP
                shutil.rmtree(x)
                shutil.rmtree(tracking_dir) 
开发者ID:mlflow,项目名称:mlflow,代码行数:34,代码来源:test_spark_model_export.py


注:本文中的mlflow.set_tracking_uri方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。