本文整理汇总了Python中mlflow.active_run方法的典型用法代码示例。如果您正苦于以下问题:Python mlflow.active_run方法的具体用法?Python mlflow.active_run怎么用?Python mlflow.active_run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mlflow
的用法示例。
在下文中一共展示了mlflow.active_run方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: log_project_artifacts_to_mlflow
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def log_project_artifacts_to_mlflow(function: Callable):
"""
Log the artifact to mlflow
Parameters
----------
function
function to wrap
"""
@wraps(function)
def wrapped(*args, **kwargs):
if mlflow.active_run() is None:
_warn_about_no_run()
return function(*args, **kwargs)
artifacts_path = project.get_active_artifacts_directory()
artifacts_path_realpath = os.path.realpath(artifacts_path)
mlflow.log_artifacts(artifacts_path_realpath)
return function(*args, **kwargs)
return wrapped
# pylint: disable=invalid-name
# this is method, not a constant, and is used inside of the patch
示例2: start
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def start(self):
"""
Start a new experiment.
"""
if self.with_mlflow:
import mlflow
if mlflow.active_run() is not None:
active_run = mlflow.active_run()
self.inherit_existing_run = True
else:
active_run = mlflow.start_run(run_name=self.mlflow_run_name, run_id=self.mlflow_run_id)
mlflow_metadata = {
'artifact_uri': active_run.info.artifact_uri,
'experiment_id': active_run.info.experiment_id,
'run_id': active_run.info.run_id
}
self.mlflow_run_id = active_run.info.run_id
with open(os.path.join(self.logging_directory, 'mlflow.json'), 'w') as f:
json.dump(mlflow_metadata, f, indent=4)
示例3: test_inherit_outer_scope_run
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_inherit_outer_scope_run(tmpdir_name):
mlflow.start_run()
mlflow.log_param('foo', 1)
params = {
'objective': 'binary',
'max_depth': 8
}
X, y = make_classification_df()
run_experiment(params, X, y, with_mlflow=True, logging_directory=tmpdir_name)
assert mlflow.active_run() is not None # still valid
client = mlflow.tracking.MlflowClient()
data = client.get_run(mlflow.active_run().info.run_id).data
assert data.metrics['Overall'] > 0 # recorded
mlflow.end_run()
示例4: test_ignore_errors_in_mlflow_params
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_ignore_errors_in_mlflow_params(tmpdir_name):
mlflow.start_run()
mlflow.log_param('features', 'ABC')
mlflow.log_metric('Overall', -99)
params = {
'objective': 'binary',
'max_depth': 8
}
X, y = make_classification_df()
result = run_experiment(params, X, y, with_mlflow=True, logging_directory=tmpdir_name, feature_list=[])
client = mlflow.tracking.MlflowClient()
data = client.get_run(mlflow.active_run().info.run_id).data
assert data.metrics['Overall'] == result.metrics[-1]
assert data.params['features'] == 'ABC' # params cannot be overwritten
mlflow.end_run()
示例5: _log_event
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def _log_event(event):
"""
Extracts metric information from the event protobuf
"""
if not mlflow.active_run():
try_mlflow_log(mlflow.start_run)
global _AUTOLOG_RUN_ID
_AUTOLOG_RUN_ID = mlflow.active_run().info.run_id
if event.WhichOneof('what') == 'summary':
summary = event.summary
for v in summary.value:
if v.HasField('simple_value'):
if (event.step-1) % _LOG_EVERY_N_STEPS == 0:
_thread_pool.submit(_add_to_queue, key=v.tag,
value=v.simple_value, step=event.step,
time=int(time.time() * 1000),
run_id=mlflow.active_run().info.run_id)
示例6: test_model_log
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_model_log(model, data, predicted):
x, _ = data
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
try:
if should_start_run:
mlflow.start_run()
artifact_path = "keras_model"
mlflow.keras.log_model(model, artifact_path=artifact_path)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=artifact_path)
# Load model
model_loaded = mlflow.keras.load_model(model_uri=model_uri)
assert all(model_loaded.predict(x) == predicted)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_uri=model_uri)
assert all(pyfunc_loaded.predict(x).values == predicted)
finally:
mlflow.end_run()
示例7: test_cli_build_image_with_runs_uri_calls_expected_azure_routines
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_cli_build_image_with_runs_uri_calls_expected_azure_routines(sklearn_model):
artifact_path = "model"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sklearn_model, artifact_path=artifact_path)
run_id = mlflow.active_run().info.run_id
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=run_id, artifact_path=artifact_path)
with AzureMLMocks() as aml_mocks:
result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(
mlflow.azureml.cli.commands,
[
'build-image',
'-m', model_uri,
'-w', 'test_workspace',
'-i', 'image_name',
'-n', 'model_name',
])
assert result.exit_code == 0
assert aml_mocks["register_model"].call_count == 1
assert aml_mocks["create_image"].call_count == 1
assert aml_mocks["load_workspace"].call_count == 1
示例8: test_autolog_ends_auto_created_run
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_autolog_ends_auto_created_run():
mlflow.gluon.autolog()
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(model.collect_params(), "adam",
optimizer_params={"learning_rate": .001, "epsilon": 1e-07})
est = estimator.Estimator(net=model, loss=SoftmaxCrossEntropyLoss(),
metrics=Accuracy(), trainer=trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3)
assert mlflow.active_run() is None
示例9: test_sparkml_model_log_persists_specified_conda_env_in_mlflow_model_directory
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_sparkml_model_log_persists_specified_conda_env_in_mlflow_model_directory(
spark_model_iris, model_path, spark_custom_env):
artifact_path = "model"
with mlflow.start_run():
sparkm.log_model(
spark_model=spark_model_iris.model,
artifact_path=artifact_path,
conda_env=spark_custom_env)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=artifact_path)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != spark_custom_env
with open(spark_custom_env, "r") as f:
spark_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == spark_custom_env_parsed
示例10: test_sparkml_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_sparkml_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
spark_model_iris):
artifact_path = "model"
with mlflow.start_run():
sparkm.log_model(
spark_model=spark_model_iris.model, artifact_path=artifact_path, conda_env=None)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=artifact_path)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == sparkm.get_default_conda_env()
示例11: test_mleap_model_log
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_mleap_model_log(spark_model_iris):
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
sparkm.log_model(spark_model=spark_model_iris.model,
sample_input=spark_model_iris.spark_df,
artifact_path=artifact_path,
registered_model_name="Model1")
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=artifact_path)
mlflow.register_model.assert_called_once_with(model_uri, "Model1")
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
config_path = os.path.join(model_path, "MLmodel")
mlflow_model = Model.load(config_path)
assert sparkm.FLAVOR_NAME in mlflow_model.flavors
assert mleap.FLAVOR_NAME in mlflow_model.flavors
示例12: test_log_metrics_uses_millisecond_timestamp_resolution_client
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_log_metrics_uses_millisecond_timestamp_resolution_client():
with start_run() as active_run, mock.patch("time.time") as time_mock:
time_mock.side_effect = lambda: 123
mlflow_client = tracking.MlflowClient()
run_id = active_run.info.run_id
mlflow_client.log_metric(run_id=run_id, key="name_1", value=25)
mlflow_client.log_metric(run_id=run_id, key="name_2", value=-3)
mlflow_client.log_metric(run_id=run_id, key="name_1", value=30)
mlflow_client.log_metric(run_id=run_id, key="name_1", value=40)
metric_history_name1 = mlflow_client.get_metric_history(run_id, "name_1")
assert set([(m.value, m.timestamp) for m in metric_history_name1]) == set([
(25, 123 * 1000),
(30, 123 * 1000),
(40, 123 * 1000),
])
metric_history_name2 = mlflow_client.get_metric_history(run_id, "name_2")
assert set([(m.value, m.timestamp) for m in metric_history_name2]) == set([
(-3, 123 * 1000),
])
示例13: test_log_batch_validates_entity_names_and_values
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_log_batch_validates_entity_names_and_values():
bad_kwargs = {
"metrics": [
[Metric(key="../bad/metric/name", value=0.3, timestamp=3, step=0)],
[Metric(key="ok-name", value="non-numerical-value", timestamp=3, step=0)],
[Metric(key="ok-name", value=0.3, timestamp="non-numerical-timestamp", step=0)],
],
"params": [[Param(key="../bad/param/name", value="my-val")]],
"tags": [[Param(key="../bad/tag/name", value="my-val")]],
}
with start_run() as active_run:
for kwarg, bad_values in bad_kwargs.items():
for bad_kwarg_value in bad_values:
final_kwargs = {
"run_id": active_run.info.run_id, "metrics": [], "params": [], "tags": [],
}
final_kwargs[kwarg] = bad_kwarg_value
with pytest.raises(MlflowException) as e:
tracking.MlflowClient().log_batch(**final_kwargs)
assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
示例14: test_parent_create_run
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_parent_create_run():
with mlflow.start_run() as parent_run:
parent_run_id = parent_run.info.run_id
os.environ[_RUN_ID_ENV_VAR] = parent_run_id
with mlflow.start_run() as parent_run:
assert parent_run.info.run_id == parent_run_id
with pytest.raises(Exception, match='To start a nested run'):
mlflow.start_run()
with mlflow.start_run(nested=True) as child_run:
assert child_run.info.run_id != parent_run_id
with mlflow.start_run(nested=True) as grand_child_run:
pass
def verify_has_parent_id_tag(child_id, expected_parent_id):
tags = tracking.MlflowClient().get_run(child_id).data.tags
assert tags[MLFLOW_PARENT_RUN_ID] == expected_parent_id
verify_has_parent_id_tag(child_run.info.run_id, parent_run.info.run_id)
verify_has_parent_id_tag(grand_child_run.info.run_id, child_run.info.run_id)
assert mlflow.active_run() is None
示例15: test_delete_tag
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import active_run [as 别名]
def test_delete_tag():
"""
Confirm that fluent API delete tags actually works
:return:
"""
mlflow.set_tag('a', 'b')
run = MlflowClient().get_run(mlflow.active_run().info.run_id)
print(run.info.run_id)
assert 'a' in run.data.tags
mlflow.delete_tag('a')
run = MlflowClient().get_run(mlflow.active_run().info.run_id)
assert 'a' not in run.data.tags
with pytest.raises(MlflowException):
mlflow.delete_tag('a')
with pytest.raises(MlflowException):
mlflow.delete_tag('b')
mlflow.end_run()