当前位置: 首页>>代码示例>>Python>>正文


Python core.Workspace方法代码示例

本文整理汇总了Python中azureml.core.Workspace方法的典型用法代码示例。如果您正苦于以下问题:Python core.Workspace方法的具体用法?Python core.Workspace怎么用?Python core.Workspace使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在azureml.core的用法示例。


在下文中一共展示了core.Workspace方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_workspace_kwargs

# 需要导入模块: from azureml import core [as 别名]
# 或者: from azureml.core import Workspace [as 别名]
def get_workspace_kwargs() -> dict:
    """Get AzureML keyword arguments from environment

    The name of this environment variable is set in the Argo workflow template,
    and its value should be in the format:
    `<subscription_id>:<resource_group>:<workspace_name>`.

    Returns
    -------
    workspace_kwargs: dict
        AzureML Workspace configuration to use for remote MLFlow tracking. See
        :func:`gordo.builder.mlflow_utils.get_mlflow_client`.
    """
    return get_kwargs_from_secret(
        "AZUREML_WORKSPACE_STR", ["subscription_id", "resource_group", "workspace_name"]
    ) 
开发者ID:equinor,项目名称:gordo,代码行数:18,代码来源:mlflow.py

示例2: mlflow_context

# 需要导入模块: from azureml import core [as 别名]
# 或者: from azureml.core import Workspace [as 别名]
def mlflow_context(
    name: str,
    model_key: str = uuid4().hex,
    workspace_kwargs: dict = {},
    service_principal_kwargs: dict = {},
):
    """
    Generate MLflow logger function with either a local or AzureML backend

    Parameters
    ----------
    name: str
        The name of the log group to log to (e.g. a model name).
    model_key: str
        Unique ID of logging run.
    workspace_kwargs: dict
        AzureML Workspace configuration to use for remote MLFlow tracking. See
        :func:`gordo.builder.mlflow_utils.get_mlflow_client`.
    service_principal_kwargs: dict
        AzureML ServicePrincipalAuthentication keyword arguments. See
        :func:`gordo.builder.mlflow_utils.get_mlflow_client`

    Example
    -------
    >>> with tempfile.TemporaryDirectory as tmp_dir:
    ...     mlflow.set_tracking_uri(f"file:{tmp_dir}")
    ...     with mlflow_context("log_group", "unique_key", {}, {}) as (mlflow_client, run_id):
    ...         log_machine(machine) # doctest: +SKIP
    """
    mlflow_client = get_mlflow_client(workspace_kwargs, service_principal_kwargs)
    run_id = get_run_id(mlflow_client, experiment_name=name, model_key=model_key)

    logger.info(
        f"MLflow client configured to use {'AzureML' if workspace_kwargs else 'local backend'}"
    )

    yield mlflow_client, run_id

    mlflow_client.set_terminated(run_id) 
开发者ID:equinor,项目名称:gordo,代码行数:41,代码来源:mlflow.py

示例3: get_compute

# 需要导入模块: from azureml import core [as 别名]
# 或者: from azureml.core import Workspace [as 别名]
def get_compute(workspace: Workspace, compute_name: str, vm_size: str, for_batch_scoring: bool = False):  # NOQA E501
    try:
        if compute_name in workspace.compute_targets:
            compute_target = workspace.compute_targets[compute_name]
            if compute_target and type(compute_target) is AmlCompute:
                print("Found existing compute target " + compute_name + " so using it.") # NOQA
        else:
            e = Env()
            compute_config = AmlCompute.provisioning_configuration(
                vm_size=vm_size,
                vm_priority=e.vm_priority if not for_batch_scoring else e.vm_priority_scoring,  # NOQA E501
                min_nodes=e.min_nodes if not for_batch_scoring else e.min_nodes_scoring,  # NOQA E501
                max_nodes=e.max_nodes if not for_batch_scoring else e.max_nodes_scoring,  # NOQA E501
                idle_seconds_before_scaledown="300"
                #    #Uncomment the below lines for VNet support
                #    vnet_resourcegroup_name=vnet_resourcegroup_name,
                #    vnet_name=vnet_name,
                #    subnet_name=subnet_name
            )
            compute_target = ComputeTarget.create(
                workspace, compute_name, compute_config
            )
            compute_target.wait_for_completion(
                show_output=True, min_node_count=None, timeout_in_minutes=10
            )
        return compute_target
    except ComputeTargetException as ex:
        print(ex)
        print("An error occurred trying to provision compute.")
        exit(1) 
开发者ID:microsoft,项目名称:MLOpsPython,代码行数:32,代码来源:attach_compute.py

示例4: get_environment

# 需要导入模块: from azureml import core [as 别名]
# 或者: from azureml.core import Workspace [as 别名]
def get_environment(
    workspace: Workspace,
    environment_name: str,
    conda_dependencies_file: str,
    create_new: bool = False,
    enable_docker: bool = None,
    use_gpu: bool = False
):
    try:
        e = Env()
        environments = Environment.list(workspace=workspace)
        restored_environment = None
        for env in environments:
            if env == environment_name:
                restored_environment = environments[environment_name]

        if restored_environment is None or create_new:
            new_env = Environment.from_conda_specification(
                environment_name,
                os.path.join(e.sources_directory_train, conda_dependencies_file),  # NOQA: E501
            )  # NOQA: E501
            restored_environment = new_env
            if enable_docker is not None:
                restored_environment.docker.enabled = enable_docker
                restored_environment.docker.base_image = DEFAULT_GPU_IMAGE if use_gpu else DEFAULT_CPU_IMAGE  # NOQA: E501
            restored_environment.register(workspace)

        if restored_environment is not None:
            print(restored_environment)
        return restored_environment
    except Exception as e:
        print(e)
        exit(1) 
开发者ID:microsoft,项目名称:MLOpsPython,代码行数:35,代码来源:manage_environment.py

示例5: register_dataset

# 需要导入模块: from azureml import core [as 别名]
# 或者: from azureml.core import Workspace [as 别名]
def register_dataset(
    aml_workspace: Workspace,
    dataset_name: str,
    datastore_name: str,
    file_path: str
) -> Dataset:
    datastore = Datastore.get(aml_workspace, datastore_name)
    dataset = Dataset.Tabular.from_delimited_files(path=(datastore, file_path))
    dataset = dataset.register(workspace=aml_workspace,
                               name=dataset_name,
                               create_new_version=True)

    return dataset 
开发者ID:microsoft,项目名称:MLOpsPython,代码行数:15,代码来源:train_aml.py

示例6: get_current_workspace

# 需要导入模块: from azureml import core [as 别名]
# 或者: from azureml.core import Workspace [as 别名]
def get_current_workspace() -> Workspace:
    """
    Retrieves and returns the current workspace.
    Will not work when ran locally.

    Parameters:
    None

    Return:
    The current workspace.
    """
    run = Run.get_context(allow_offline=False)
    experiment = run.experiment
    return experiment.workspace 
开发者ID:microsoft,项目名称:MLOpsPython,代码行数:16,代码来源:model_helper.py

示例7: get_compute

# 需要导入模块: from azureml import core [as 别名]
# 或者: from azureml.core import Workspace [as 别名]
def get_compute(
    workspace: Workspace,
    dbcomputename: str,
    resource_group: str,
    dbworkspace: str,
    dbaccesstoken: str
):
    try:
        databricks_compute = DatabricksCompute(
            workspace=workspace,
            name=dbcomputename)
        print('Compute target {} already exists'.format(dbcomputename))
    except ComputeTargetException:
        print('Compute not found, will use below parameters to attach new one')
        print('db_compute_name {}'.format(dbcomputename))
        print('db_resource_group {}'.format(resource_group))
        print('db_workspace_name {}'.format(dbworkspace))

        config = DatabricksCompute.attach_configuration(
            resource_group=resource_group,
            workspace_name=dbworkspace,
            access_token=dbaccesstoken)

        databricks_compute = ComputeTarget.attach(
            workspace,
            dbcomputename,
            config)
        databricks_compute.wait_for_completion(True)
    return databricks_compute 
开发者ID:Azure-Samples,项目名称:MLOpsDatabricks,代码行数:31,代码来源:attach_compute.py

示例8: get_model

# 需要导入模块: from azureml import core [as 别名]
# 或者: from azureml.core import Workspace [as 别名]
def get_model(
    model_name: str,
    model_version: int = None,  # If none, return latest model
    tag_name: str = None,
    tag_value: str = None,
    aml_workspace: Workspace = None
) -> AMLModel:
    """
    Retrieves and returns a model from the workspace by its name
    and (optional) tag.

    Parameters:
    aml_workspace (Workspace): aml.core Workspace that the model lives.
    model_name (str): name of the model we are looking for
    (optional) model_version (str): model version. Latest if not provided.
    (optional) tag (str): the tag value & name the model was registered under.

    Return:
    A single aml model from the workspace that matches the name and tag, or
    None.
    """
    if aml_workspace is None:
        print("No workspace defined - using current experiment workspace.")
        aml_workspace = get_current_workspace()

    tags = None
    if tag_name is not None or tag_value is not None:
        # Both a name and value must be specified to use tags.
        if tag_name is None or tag_value is None:
            raise ValueError(
                "model_tag_name and model_tag_value should both be supplied"
                + "or excluded"  # NOQA: E501
            )
        tags = [[tag_name, tag_value]]

    model = None
    if model_version is not None:
        # TODO(tcare): Finding a specific version currently expects exceptions
        # to propagate in the case we can't find the model. This call may
        # result in a WebserviceException that may or may not be due to the
        # model not existing.
        model = AMLModel(
            aml_workspace,
            name=model_name,
            version=model_version,
            tags=tags)
    else:
        models = AMLModel.list(
            aml_workspace, name=model_name, tags=tags, latest=True)
        if len(models) == 1:
            model = models[0]
        elif len(models) > 1:
            raise Exception("Expected only one model")

    return model 
开发者ID:microsoft,项目名称:MLOpsPython,代码行数:57,代码来源:model_helper.py


注:本文中的azureml.core.Workspace方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。