本文整理汇总了Python中mlflow.end_run方法的典型用法代码示例。如果您正苦于以下问题:Python mlflow.end_run方法的具体用法?Python mlflow.end_run怎么用?Python mlflow.end_run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mlflow
的用法示例。
在下文中一共展示了mlflow.end_run方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: stop
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def stop(self):
"""
Stop current experiment.
"""
self._save_dict(self.metrics, 'metrics.json')
self._save_dict(self.params, 'params.json')
if not self.is_custom:
for h in self.logger.handlers:
h.close()
if self.with_mlflow:
import mlflow
from mlflow.exceptions import MlflowException
try:
mlflow.log_artifact(self.log_path)
mlflow.log_artifact(os.path.join(self.logging_directory, 'metrics.json'))
mlflow.log_artifact(os.path.join(self.logging_directory, 'params.json'))
except MlflowException as e:
warnings.warn('Error in saving artifacts to mlflow. The result may not be saved.: {}'.format(e))
if not self.inherit_existing_run:
mlflow.end_run()
示例2: test_inherit_outer_scope_run
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def test_inherit_outer_scope_run(tmpdir_name):
mlflow.start_run()
mlflow.log_param('foo', 1)
params = {
'objective': 'binary',
'max_depth': 8
}
X, y = make_classification_df()
run_experiment(params, X, y, with_mlflow=True, logging_directory=tmpdir_name)
assert mlflow.active_run() is not None # still valid
client = mlflow.tracking.MlflowClient()
data = client.get_run(mlflow.active_run().info.run_id).data
assert data.metrics['Overall'] > 0 # recorded
mlflow.end_run()
示例3: test_ignore_errors_in_mlflow_params
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def test_ignore_errors_in_mlflow_params(tmpdir_name):
mlflow.start_run()
mlflow.log_param('features', 'ABC')
mlflow.log_metric('Overall', -99)
params = {
'objective': 'binary',
'max_depth': 8
}
X, y = make_classification_df()
result = run_experiment(params, X, y, with_mlflow=True, logging_directory=tmpdir_name, feature_list=[])
client = mlflow.tracking.MlflowClient()
data = client.get_run(mlflow.active_run().info.run_id).data
assert data.metrics['Overall'] == result.metrics[-1]
assert data.params['features'] == 'ABC' # params cannot be overwritten
mlflow.end_run()
示例4: test_model_log
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def test_model_log(model, data, predicted):
x, _ = data
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
try:
if should_start_run:
mlflow.start_run()
artifact_path = "keras_model"
mlflow.keras.log_model(model, artifact_path=artifact_path)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=artifact_path)
# Load model
model_loaded = mlflow.keras.load_model(model_uri=model_uri)
assert all(model_loaded.predict(x) == predicted)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_uri=model_uri)
assert all(pyfunc_loaded.predict(x).values == predicted)
finally:
mlflow.end_run()
示例5: test_delete_tag
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def test_delete_tag():
"""
Confirm that fluent API delete tags actually works
:return:
"""
mlflow.set_tag('a', 'b')
run = MlflowClient().get_run(mlflow.active_run().info.run_id)
print(run.info.run_id)
assert 'a' in run.data.tags
mlflow.delete_tag('a')
run = MlflowClient().get_run(mlflow.active_run().info.run_id)
assert 'a' not in run.data.tags
with pytest.raises(MlflowException):
mlflow.delete_tag('a')
with pytest.raises(MlflowException):
mlflow.delete_tag('b')
mlflow.end_run()
示例6: test_model_log
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def test_model_log(h2o_iris_model):
h2o_model = h2o_iris_model.model
old_uri = mlflow.get_tracking_uri()
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
with TempDir(chdr=True, remove_on_exit=True):
try:
artifact_path = "gbm_model"
mlflow.set_tracking_uri("test")
if should_start_run:
mlflow.start_run()
mlflow.h2o.log_model(h2o_model=h2o_model, artifact_path=artifact_path)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=artifact_path)
# Load model
h2o_model_loaded = mlflow.h2o.load_model(model_uri=model_uri)
assert all(
h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() ==
h2o_model.predict(h2o_iris_model.inference_data).as_data_frame())
finally:
mlflow.end_run()
mlflow.set_tracking_uri(old_uri)
示例7: test_model_log
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def test_model_log(onnx_model, onnx_custom_env):
# pylint: disable=unused-argument
import onnx
import mlflow.onnx
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
try:
if should_start_run:
mlflow.start_run()
artifact_path = "onnx_model"
mlflow.onnx.log_model(onnx_model=onnx_model,
artifact_path=artifact_path,
conda_env=onnx_custom_env)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=artifact_path)
# Load model
onnx.checker.check_model = mock.Mock()
mlflow.onnx.load_model(model_uri)
assert onnx.checker.check_model.called
finally:
mlflow.end_run()
示例8: test_model_log_load_no_active_run
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def test_model_log_load_no_active_run(sklearn_knn_model, iris_data, tmpdir):
sk_model_path = os.path.join(str(tmpdir), "knn.pkl")
with open(sk_model_path, "wb") as f:
pickle.dump(sklearn_knn_model, f)
pyfunc_artifact_path = "pyfunc_model"
assert mlflow.active_run() is None
mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
data_path=sk_model_path,
loader_module=os.path.basename(__file__)[:-3],
code_path=[__file__])
pyfunc_model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path))
model_config = Model.load(os.path.join(pyfunc_model_path, "MLmodel"))
assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors
assert mlflow.pyfunc.PY_VERSION in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]
reloaded_model = mlflow.pyfunc.load_pyfunc(pyfunc_model_path)
np.testing.assert_array_equal(
sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0]))
mlflow.end_run()
示例9: test_pyfunc_model_log_load_no_active_run
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def test_pyfunc_model_log_load_no_active_run(sklearn_knn_model, main_scoped_model_class, iris_data):
sklearn_artifact_path = "sk_model_no_run"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sklearn_knn_model, artifact_path=sklearn_artifact_path)
sklearn_model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=sklearn_artifact_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_artifact_path = "pyfunc_model"
assert mlflow.active_run() is None
mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
artifacts={"sk_model": sklearn_model_uri},
python_model=main_scoped_model_class(test_predict))
pyfunc_model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=pyfunc_artifact_path)
loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_uri)
np.testing.assert_array_equal(
loaded_pyfunc_model.predict(iris_data[0]),
test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0]))
mlflow.end_run()
示例10: test_log_model_calls_register_model
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def test_log_model_calls_register_model(sklearn_knn_model, main_scoped_model_class):
register_model_patch = mock.patch("mlflow.register_model")
with register_model_patch:
sklearn_artifact_path = "sk_model_no_run"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sklearn_knn_model,
artifact_path=sklearn_artifact_path)
sklearn_model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=sklearn_artifact_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_artifact_path = "pyfunc_model"
assert mlflow.active_run() is None
mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
artifacts={"sk_model": sklearn_model_uri},
python_model=main_scoped_model_class(test_predict),
registered_model_name="AdsModel1")
model_uri = "runs:/{run_id}/{artifact_path}".format(run_id=mlflow.active_run().info.run_id,
artifact_path=pyfunc_artifact_path)
mlflow.register_model.assert_called_once_with(model_uri, "AdsModel1")
mlflow.end_run()
示例11: test_log_model_no_registered_model_name
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def test_log_model_no_registered_model_name(sklearn_knn_model, main_scoped_model_class):
register_model_patch = mock.patch("mlflow.register_model")
with register_model_patch:
sklearn_artifact_path = "sk_model_no_run"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sklearn_knn_model,
artifact_path=sklearn_artifact_path)
sklearn_model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=sklearn_artifact_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_artifact_path = "pyfunc_model"
assert mlflow.active_run() is None
mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
artifacts={"sk_model": sklearn_model_uri},
python_model=main_scoped_model_class(test_predict))
mlflow.register_model.assert_not_called()
mlflow.end_run()
示例12: end_run
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def end_run(cls):
logger.info(f"**** End of Experiment **** ")
示例13: end_run
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def end_run():
pass
示例14: configure
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def configure(
self,
run_uuid,
experiment_name,
tracking_uri,
run_name=None,
always_log_artifacts=False,
create_run=True,
create_experiment=True,
nest_run=True,
):
if mlflow.active_run() and not nest_run:
logger.info('Ending previous MLFlow run: {}.'.format(self.run_uuid))
mlflow.end_run()
self.always_log_artifacts = always_log_artifacts
self._experiment_name = experiment_name
self._run_name = run_name
# MLflow specific
if tracking_uri:
mlflow.set_tracking_uri(tracking_uri)
if run_uuid:
existing_run = MlflowClient().get_run(run_uuid)
if not existing_run and not create_run:
raise FileNotFoundError(
'Run ID {} not found under {}'.format(
run_uuid, mlflow.get_tracking_uri()
)
)
experiment_id = self._retrieve_mlflow_experiment_id(
experiment_name, create=create_experiment
)
return mlflow.start_run(
run_uuid,
experiment_id=experiment_id,
run_name=run_name,
nested=nest_run,
)
示例15: close
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import end_run [as 别名]
def close(self, mlflow=False):
for prefix, writer in self.writers.items():
writer.close()
if mlflow:
module_mlflow.end_run()