本文整理汇总了Python中mlflow.start_run方法的典型用法代码示例。如果您正苦于以下问题:Python mlflow.start_run方法的具体用法?Python mlflow.start_run怎么用?Python mlflow.start_run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mlflow
的用法示例。
在下文中一共展示了mlflow.start_run方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: start
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def start(self):
"""
Start a new experiment.
"""
if self.with_mlflow:
import mlflow
if mlflow.active_run() is not None:
active_run = mlflow.active_run()
self.inherit_existing_run = True
else:
active_run = mlflow.start_run(run_name=self.mlflow_run_name, run_id=self.mlflow_run_id)
mlflow_metadata = {
'artifact_uri': active_run.info.artifact_uri,
'experiment_id': active_run.info.experiment_id,
'run_id': active_run.info.run_id
}
self.mlflow_run_id = active_run.info.run_id
with open(os.path.join(self.logging_directory, 'mlflow.json'), 'w') as f:
json.dump(mlflow_metadata, f, indent=4)
示例2: test_inherit_outer_scope_run
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_inherit_outer_scope_run(tmpdir_name):
mlflow.start_run()
mlflow.log_param('foo', 1)
params = {
'objective': 'binary',
'max_depth': 8
}
X, y = make_classification_df()
run_experiment(params, X, y, with_mlflow=True, logging_directory=tmpdir_name)
assert mlflow.active_run() is not None # still valid
client = mlflow.tracking.MlflowClient()
data = client.get_run(mlflow.active_run().info.run_id).data
assert data.metrics['Overall'] > 0 # recorded
mlflow.end_run()
示例3: test_ignore_errors_in_mlflow_params
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_ignore_errors_in_mlflow_params(tmpdir_name):
mlflow.start_run()
mlflow.log_param('features', 'ABC')
mlflow.log_metric('Overall', -99)
params = {
'objective': 'binary',
'max_depth': 8
}
X, y = make_classification_df()
result = run_experiment(params, X, y, with_mlflow=True, logging_directory=tmpdir_name, feature_list=[])
client = mlflow.tracking.MlflowClient()
data = client.get_run(mlflow.active_run().info.run_id).data
assert data.metrics['Overall'] == result.metrics[-1]
assert data.params['features'] == 'ABC' # params cannot be overwritten
mlflow.end_run()
示例4: _log_event
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def _log_event(event):
"""
Extracts metric information from the event protobuf
"""
if not mlflow.active_run():
try_mlflow_log(mlflow.start_run)
global _AUTOLOG_RUN_ID
_AUTOLOG_RUN_ID = mlflow.active_run().info.run_id
if event.WhichOneof('what') == 'summary':
summary = event.summary
for v in summary.value:
if v.HasField('simple_value'):
if (event.step-1) % _LOG_EVERY_N_STEPS == 0:
_thread_pool.submit(_add_to_queue, key=v.tag,
value=v.simple_value, step=event.step,
time=int(time.time() * 1000),
run_id=mlflow.active_run().info.run_id)
示例5: etl_data
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def etl_data(ratings_csv, max_row_limit):
with mlflow.start_run() as mlrun:
tmpdir = tempfile.mkdtemp()
ratings_parquet_dir = os.path.join(tmpdir, 'ratings-parquet')
spark = pyspark.sql.SparkSession.builder.getOrCreate()
print("Converting ratings CSV %s to Parquet %s" % (ratings_csv, ratings_parquet_dir))
ratings_df = spark.read \
.option("header", "true") \
.option("inferSchema", "true") \
.csv(ratings_csv) \
.drop("timestamp") # Drop unused column
ratings_df.show()
if max_row_limit != -1:
ratings_df = ratings_df.limit(max_row_limit)
ratings_df.write.parquet(ratings_parquet_dir)
print("Uploading Parquet ratings: %s" % ratings_parquet_dir)
mlflow.log_artifacts(ratings_parquet_dir, "ratings-parquet-dir")
示例6: load_raw_data
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def load_raw_data(url):
with mlflow.start_run() as mlrun:
local_dir = tempfile.mkdtemp()
local_filename = os.path.join(local_dir, "ml-20m.zip")
print("Downloading %s to %s" % (url, local_filename))
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
extracted_dir = os.path.join(local_dir, 'ml-20m')
print("Extracting %s into %s" % (local_filename, extracted_dir))
with zipfile.ZipFile(local_filename, 'r') as zip_ref:
zip_ref.extractall(local_dir)
ratings_file = os.path.join(extracted_dir, 'ratings.csv')
print("Uploading ratings: %s" % ratings_file)
mlflow.log_artifact(ratings_file, "ratings-csv-dir")
示例7: test_model_log_persists_specified_conda_env_in_mlflow_model_directory
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(model, keras_custom_env):
artifact_path = "model"
with mlflow.start_run():
mlflow.keras.log_model(
keras_model=model, artifact_path=artifact_path, conda_env=keras_custom_env)
model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path))
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != keras_custom_env
with open(keras_custom_env, "r") as f:
keras_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == keras_custom_env_parsed
示例8: test_cli_build_image_with_runs_uri_calls_expected_azure_routines
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_cli_build_image_with_runs_uri_calls_expected_azure_routines(sklearn_model):
artifact_path = "model"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sklearn_model, artifact_path=artifact_path)
run_id = mlflow.active_run().info.run_id
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=run_id, artifact_path=artifact_path)
with AzureMLMocks() as aml_mocks:
result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(
mlflow.azureml.cli.commands,
[
'build-image',
'-m', model_uri,
'-w', 'test_workspace',
'-i', 'image_name',
'-n', 'model_name',
])
assert result.exit_code == 0
assert aml_mocks["register_model"].call_count == 1
assert aml_mocks["create_image"].call_count == 1
assert aml_mocks["load_workspace"].call_count == 1
示例9: test_prepare_env_passes
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_prepare_env_passes(sk_model):
if no_conda:
pytest.skip("This test requires conda.")
with TempDir(chdr=True):
with mlflow.start_run() as active_run:
mlflow.sklearn.log_model(sk_model, "model")
model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id)
# Test with no conda
p = subprocess.Popen(["mlflow", "models", "prepare-env", "-m", model_uri,
"--no-conda"], stderr=subprocess.PIPE)
assert p.wait() == 0
# With conda
p = subprocess.Popen(["mlflow", "models", "prepare-env", "-m", model_uri],
stderr=subprocess.PIPE)
assert p.wait() == 0
# Should be idempotent
p = subprocess.Popen(["mlflow", "models", "prepare-env", "-m", model_uri],
stderr=subprocess.PIPE)
assert p.wait() == 0
示例10: test_prepare_env_fails
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_prepare_env_fails(sk_model):
if no_conda:
pytest.skip("This test requires conda.")
with TempDir(chdr=True):
with mlflow.start_run() as active_run:
mlflow.sklearn.log_model(sk_model, "model",
conda_env={"dependencies": ["mlflow-does-not-exist-dep==abc"]})
model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id)
# Test with no conda
p = subprocess.Popen(["mlflow", "models", "prepare-env", "-m", model_uri,
"--no-conda"])
assert p.wait() == 0
# With conda - should fail due to bad conda environment.
p = subprocess.Popen(["mlflow", "models", "prepare-env", "-m", model_uri])
assert p.wait() != 0
示例11: test_model_log
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_model_log():
with TempDir(chdr=True) as tmp:
experiment_id = mlflow.create_experiment("test")
sig = ModelSignature(inputs=Schema([ColSpec("integer", "x"), ColSpec("integer", "y")]),
outputs=Schema([ColSpec(name=None, type="double")]))
input_example = {"x": 1, "y": 2}
with mlflow.start_run(experiment_id=experiment_id) as r:
Model.log("some/path", TestFlavor,
signature=sig,
input_example=input_example)
local_path = _download_artifact_from_uri("runs:/{}/some/path".format(r.info.run_id),
output_path=tmp.path(""))
loaded_model = Model.load(os.path.join(local_path, "MLmodel"))
assert loaded_model.run_id == r.info.run_id
assert loaded_model.artifact_path == "some/path"
assert loaded_model.flavors == {
"flavor1": {"a": 1, "b": 2},
"flavor2": {"x": 1, "y": 2},
}
assert loaded_model.signature == sig
path = os.path.join(local_path, loaded_model.saved_input_example_info["artifact_path"])
x = _dataframe_from_json(path)
assert x.to_dict(orient="records")[0] == input_example
示例12: test_autolog_persists_manually_created_run
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_autolog_persists_manually_created_run():
mlflow.gluon.autolog()
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
with mlflow.start_run() as run:
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(model.collect_params(), "adam",
optimizer_params={"learning_rate": .001, "epsilon": 1e-07})
est = estimator.Estimator(net=model, loss=SoftmaxCrossEntropyLoss(),
metrics=Accuracy(), trainer=trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3)
assert mlflow.active_run().info.run_id == run.info.run_id
示例13: test_sparkml_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_sparkml_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
spark_model_iris):
artifact_path = "model"
with mlflow.start_run():
sparkm.log_model(
spark_model=spark_model_iris.model, artifact_path=artifact_path, conda_env=None)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=artifact_path)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == sparkm.get_default_conda_env()
示例14: test_mleap_model_log
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_mleap_model_log(spark_model_iris):
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
sparkm.log_model(spark_model=spark_model_iris.model,
sample_input=spark_model_iris.spark_df,
artifact_path=artifact_path,
registered_model_name="Model1")
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id,
artifact_path=artifact_path)
mlflow.register_model.assert_called_once_with(model_uri, "Model1")
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
config_path = os.path.join(model_path, "MLmodel")
mlflow_model = Model.load(config_path)
assert sparkm.FLAVOR_NAME in mlflow_model.flavors
assert mleap.FLAVOR_NAME in mlflow_model.flavors
示例15: test_download_artifact_from_absolute_uri_persists_data_to_specified_output_directory
# 需要导入模块: import mlflow [as 别名]
# 或者: from mlflow import start_run [as 别名]
def test_download_artifact_from_absolute_uri_persists_data_to_specified_output_directory(tmpdir):
artifact_file_name = "artifact.txt"
artifact_text = "Sample artifact text"
local_artifact_path = tmpdir.join(artifact_file_name).strpath
with open(local_artifact_path, "w") as out:
out.write(artifact_text)
logged_artifact_subdir = "logged_artifact"
with mlflow.start_run():
mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_subdir)
artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_subdir)
artifact_output_path = tmpdir.join("artifact_output").strpath
os.makedirs(artifact_output_path)
_download_artifact_from_uri(artifact_uri=artifact_uri, output_path=artifact_output_path)
assert logged_artifact_subdir in os.listdir(artifact_output_path)
assert artifact_file_name in os.listdir(
os.path.join(artifact_output_path, logged_artifact_subdir))
with open(os.path.join(
artifact_output_path, logged_artifact_subdir, artifact_file_name), "r") as f:
assert f.read() == artifact_text