当前位置: 首页>>代码示例>>Python>>正文


Python mlflow.get_artifact_uri方法代码示例

本文整理汇总了Python中mlflow.get_artifact_uri方法的典型用法代码示例。如果您正苦于以下问题:Python mlflow.get_artifact_uri方法的具体用法?Python mlflow.get_artifact_uri怎么用?Python mlflow.get_artifact_uri使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mlflow的用法示例。


在下文中一共展示了mlflow.get_artifact_uri方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_artifact_can_be_downloaded_from_absolute_uri_successfully

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def test_artifact_can_be_downloaded_from_absolute_uri_successfully(tmpdir):
    artifact_file_name = "artifact.txt"
    artifact_text = "Sample artifact text"
    local_artifact_path = tmpdir.join(artifact_file_name).strpath
    with open(local_artifact_path, "w") as out:
        out.write(artifact_text)

    logged_artifact_path = "artifact"
    with mlflow.start_run():
        mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_path)
        artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_path)

    downloaded_artifact_path = os.path.join(
        _download_artifact_from_uri(artifact_uri), artifact_file_name)
    assert downloaded_artifact_path != local_artifact_path
    assert downloaded_artifact_path != logged_artifact_path
    with open(downloaded_artifact_path, "r") as f:
        assert f.read() == artifact_text 
开发者ID:mlflow,项目名称:mlflow,代码行数:20,代码来源:test_artifact_utils.py

示例2: test_download_artifact_from_absolute_uri_persists_data_to_specified_output_directory

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def test_download_artifact_from_absolute_uri_persists_data_to_specified_output_directory(tmpdir):
    artifact_file_name = "artifact.txt"
    artifact_text = "Sample artifact text"
    local_artifact_path = tmpdir.join(artifact_file_name).strpath
    with open(local_artifact_path, "w") as out:
        out.write(artifact_text)

    logged_artifact_subdir = "logged_artifact"
    with mlflow.start_run():
        mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_subdir)
        artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_subdir)

    artifact_output_path = tmpdir.join("artifact_output").strpath
    os.makedirs(artifact_output_path)
    _download_artifact_from_uri(artifact_uri=artifact_uri, output_path=artifact_output_path)
    assert logged_artifact_subdir in os.listdir(artifact_output_path)
    assert artifact_file_name in os.listdir(
        os.path.join(artifact_output_path, logged_artifact_subdir))
    with open(os.path.join(
            artifact_output_path, logged_artifact_subdir, artifact_file_name), "r") as f:
        assert f.read() == artifact_text 
开发者ID:mlflow,项目名称:mlflow,代码行数:23,代码来源:test_artifact_utils.py

示例3: log_saved_model

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def log_saved_model(saved_model_path: Union[bytes, str],
                    global_step: int,
                    saved_model_load_fn: Callable):
    """
    Log all the saved models to mlflow

    Parameters
    ----------
    saved_model_path
        path to saved model
    global_step
        global step for saved model
    """
    # pylint: disable=unused-argument
    # saved_model_load_fn is coming from patch
    if mlflow.active_run() is None:
        _warn_about_no_run()
        return
    if isinstance(saved_model_path, bytes):
        saved_model_path = saved_model_path.decode()
    saved_model_tag = os.path.split(saved_model_path)[-1]
    artifact_path = os.path.join("models", saved_model_tag)
    mlflow_tf.log_model(
        tf_saved_model_dir=saved_model_path,
        tf_meta_graph_tags=[tag_constants.SERVING],
        tf_signature_def_key=
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
        artifact_path=artifact_path)
    mlflow_artifacts_path = mlflow.get_artifact_uri()
    saved_model_artifact_path = os.path.join(
        mlflow_artifacts_path, artifact_path)
    project_utils.log_exported_model_info(
        saved_model_artifact_path, global_step) 
开发者ID:audi,项目名称:nucleus7,代码行数:35,代码来源:mlflow_utils.py

示例4: get_artifact_uri

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def get_artifact_uri():
        return None 
开发者ID:Unbabel,项目名称:OpenKiwi,代码行数:4,代码来源:loggers.py

示例5: test_log_artifact_with_dirs

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def test_log_artifact_with_dirs(tmpdir):
    # Test log artifact with a directory
    art_dir = tmpdir.mkdir("parent")
    file0 = art_dir.join("file0")
    file0.write("something")
    file1 = art_dir.join("file1")
    file1.write("something")
    sub_dir = art_dir.mkdir("child")
    with start_run():
        artifact_uri = mlflow.get_artifact_uri()
        run_artifact_dir = local_file_uri_to_path(artifact_uri)
        mlflow.log_artifact(str(art_dir))
        base = os.path.basename(str(art_dir))
        assert os.listdir(run_artifact_dir) == [base]
        assert set(os.listdir(os.path.join(run_artifact_dir, base))) == \
            {'child', 'file0', 'file1'}
        with open(os.path.join(run_artifact_dir, base, "file0")) as f:
            assert f.read() == "something"
    # Test log artifact with directory and specified parent folder
    art_dir = tmpdir.mkdir("dir")
    with start_run():
        artifact_uri = mlflow.get_artifact_uri()
        run_artifact_dir = local_file_uri_to_path(artifact_uri)
        mlflow.log_artifact(str(art_dir), "some_parent")
        assert os.listdir(run_artifact_dir) == [os.path.basename("some_parent")]
        assert os.listdir(os.path.join(run_artifact_dir, "some_parent")) == \
            [os.path.basename(str(art_dir))]
    sub_dir = art_dir.mkdir("another_dir")
    with start_run():
        artifact_uri = mlflow.get_artifact_uri()
        run_artifact_dir = local_file_uri_to_path(artifact_uri)
        mlflow.log_artifact(str(art_dir), "parent/and_child")
        assert os.listdir(os.path.join(run_artifact_dir, "parent", "and_child")) == \
            [os.path.basename(str(art_dir))]
        assert os.listdir(os.path.join(run_artifact_dir,
                                       "parent", "and_child",
                                       os.path.basename(str(art_dir)))) == \
            [os.path.basename(str(sub_dir))] 
开发者ID:mlflow,项目名称:mlflow,代码行数:40,代码来源:test_tracking.py

示例6: test_get_artifact_uri_with_artifact_path_unspecified_returns_artifact_root_dir

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def test_get_artifact_uri_with_artifact_path_unspecified_returns_artifact_root_dir():
    with mlflow.start_run() as active_run:
        assert mlflow.get_artifact_uri(artifact_path=None) == active_run.info.artifact_uri 
开发者ID:mlflow,项目名称:mlflow,代码行数:5,代码来源:test_tracking.py

示例7: test_get_artifact_uri_uses_currently_active_run_id

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def test_get_artifact_uri_uses_currently_active_run_id():
    artifact_path = "artifact"
    with mlflow.start_run() as active_run:
        assert mlflow.get_artifact_uri(artifact_path=artifact_path) == \
            tracking.artifact_utils.get_artifact_uri(
            run_id=active_run.info.run_id, artifact_path=artifact_path) 
开发者ID:mlflow,项目名称:mlflow,代码行数:8,代码来源:test_tracking.py

示例8: test_get_artifact_uri_appends_to_uri_path_component_correctly

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def test_get_artifact_uri_appends_to_uri_path_component_correctly(
        artifact_location, expected_uri_format):
    client = MlflowClient()
    client.create_experiment("get-artifact-uri-test", artifact_location=artifact_location)
    mlflow.set_experiment("get-artifact-uri-test")
    with mlflow.start_run():
        run_id = mlflow.active_run().info.run_id
        for artifact_path in ["path/to/artifact", "/artifact/path", "arty.txt"]:
            artifact_uri = mlflow.get_artifact_uri(artifact_path)
            assert artifact_uri == tracking.artifact_utils.get_artifact_uri(run_id, artifact_path)
            assert artifact_uri == expected_uri_format.format(
                run_id=run_id, path=artifact_path.lstrip("/")) 
开发者ID:mlflow,项目名称:mlflow,代码行数:14,代码来源:test_tracking.py

示例9: _mlflow_get_output_path

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def _mlflow_get_output_path():
    return mlflow.get_artifact_uri() 
开发者ID:pytorch,项目名称:ignite,代码行数:4,代码来源:exp_tracking.py

示例10: log

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def log(cls, artifact_path, flavor, registered_model_name=None, **kwargs):
        """
        Log model using supplied flavor module. If no run is active, this method will create a new
        active run.

        :param artifact_path: Run relative path identifying the model.
        :param flavor: Flavor module to save the model with. The module must have
                       the ``save_model`` function that will persist the model as a valid
                       MLflow model.
        :param registered_model_name: (Experimental) If given, create a model version under
                                      ``registered_model_name``, also creating a registered model if
                                      one with the given name does not exist.
        :param signature: (Experimental) :py:class:`ModelSignature` describes model input
                          and output :py:class:`Schema <mlflow.types.Schema>`. The model signature
                          can be :py:func:`inferred <infer_signature>` from datasets representing
                          valid model input (e.g. the training dataset) and valid model output
                          (e.g. model predictions generated on the training dataset), for example:

                          .. code-block:: python

                            from mlflow.models.signature import infer_signature
                            train = df.drop_column("target_label")
                            signature = infer_signature(train, model.predict(train))

        :param input_example: (Experimental) Input example provides one or several examples of
                              valid model input. The example can be used as a hint of what data to
                              feed the model. The given example will be converted to a Pandas
                              DataFrame and then serialized to json using the Pandas split-oriented
                              format. Bytes are base64-encoded.

        :param kwargs: Extra args passed to the model flavor.
        """
        with TempDir() as tmp:
            local_path = tmp.path("model")
            run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
            mlflow_model = cls(artifact_path=artifact_path, run_id=run_id)
            flavor.save_model(path=local_path, mlflow_model=mlflow_model,
                              **kwargs)
            mlflow.tracking.fluent.log_artifacts(local_path, artifact_path)
            try:
                mlflow.tracking.fluent._record_logged_model(mlflow_model)
            except MlflowException:
                # We need to swallow all mlflow exceptions to maintain backwards compatibility with
                # older tracking servers. Only print out a warning for now.
                _logger.warning(
                    "Logging model metadata to the tracking server has failed, possibly due older "
                    "server version. The model artifacts have been logged successfully under %s. "
                    "In addition to exporting model artifacts, MLflow clients 1.7.0 and above "
                    "attempt to record model metadata to the  tracking store. If logging to a "
                    "mlflow server via REST, consider  upgrading the server version to MLflow "
                    "1.7.0 or above.", mlflow.get_artifact_uri())
            if registered_model_name is not None:
                run_id = mlflow.tracking.fluent.active_run().info.run_id
                mlflow.register_model("runs:/%s/%s" % (run_id, artifact_path),
                                      registered_model_name) 
开发者ID:mlflow,项目名称:mlflow,代码行数:57,代码来源:model.py

示例11: main

# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import get_artifact_uri [as 别名]
def main(argv):
    with mlflow.start_run():
        args = parser.parse_args(argv[1:])

        # Builds, trains and evaluates a tf.estimator. Then, exports it for inference,
        # logs the exported model with MLflow, and loads the fitted model back as a PyFunc.
        (x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data()

        # There are 13 features we are using for inference.
        feat_cols = [tf.feature_column.numeric_column(key="features", shape=(x_train.shape[1],))]
        feat_spec = {
            "features": tf.placeholder("float", name="features", shape=[None, x_train.shape[1]])}

        hidden_units = [50, 20]
        steps = args.steps

        regressor = tf.estimator.DNNRegressor(hidden_units=hidden_units, feature_columns=feat_cols)
        train_input_fn = tf.estimator.inputs.numpy_input_fn({"features": x_train}, y_train,
                                                            num_epochs=None, shuffle=True)
        regressor.train(train_input_fn, steps=steps)
        test_input_fn = tf.estimator.inputs.numpy_input_fn({"features": x_test}, y_test,
                                                           num_epochs=None, shuffle=True)
        # Compute mean squared error
        mse = regressor.evaluate(test_input_fn, steps=steps)

        # Building a receiver function for exporting
        receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feat_spec)
        temp = tempfile.mkdtemp()
        try:
            # The model is automatically logged when export_saved_model() is called.
            saved_estimator_path = regressor.export_savedmodel(temp, receiver_fn).decode("utf-8")

            # Since the model was automatically logged as an artifact (more specifically
            # a MLflow Model), we don't need to use saved_estimator_path to load back the model.
            # MLflow takes care of it!
            pyfunc_model = pyfunc.load_model(mlflow.get_artifact_uri('model'))
            df = pd.DataFrame(data=x_test, columns=["features"] * x_train.shape[1])

            # Checking the PyFunc's predictions are the same as the original model's predictions.
            predict_df = pyfunc_model.predict(df)
            predict_df['original_labels'] = y_test
            print(predict_df)
        finally:
            shutil.rmtree(temp) 
开发者ID:mlflow,项目名称:mlflow,代码行数:46,代码来源:train_predict.py


注:本文中的mlflow.get_artifact_uri方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。