本文整理汇总了Python中azureml.core.compute.ComputeTarget.create方法的典型用法代码示例。如果您正苦于以下问题:Python ComputeTarget.create方法的具体用法?Python ComputeTarget.create怎么用?Python ComputeTarget.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类azureml.core.compute.ComputeTarget
的用法示例。
在下文中一共展示了ComputeTarget.create方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _create_cluster
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def _create_cluster(
workspace,
cluster_name=_CLUSTER_NAME,
vm_size=_CLUSTER_VM_SIZE,
min_nodes=_CLUSTER_MIN_NODES,
max_nodes=_CLUSTER_MAX_NODES,
):
logger = logging.getLogger(__name__)
try:
compute_target = ComputeTarget(workspace=workspace, name=cluster_name)
logger.info("Found existing compute target.")
except ComputeTargetException:
logger.info("Creating a new compute target...")
compute_config = AmlCompute.provisioning_configuration(
vm_size=vm_size, min_nodes=min_nodes, max_nodes=max_nodes
)
# create the cluster
compute_target = ComputeTarget.create(workspace, cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# use get_status() to get a detailed status for the current AmlCompute.
logger.debug(compute_target.get_status().serialize())
return compute_target
示例2: _get_compute_target
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def _get_compute_target(self, ws, cluster_name):
compute_min_nodes = int(self.ctx.config.get('cluster/min_nodes',1))
compute_max_nodes = int(self.ctx.config.get('cluster/max_nodes',4))
compute_sku = self.ctx.config.get('cluster/type','STANDARD_D2_V2')
if cluster_name in ws.compute_targets:
compute_target = ws.compute_targets[cluster_name]
if compute_target and type(compute_target) is AmlCompute:
ct_status = compute_target.get_status()
if ct_status:
ct_def = ct_status.serialize()
if ct_def.get('vmSize') == compute_sku and \
ct_def.get('scaleSettings', {}).get('minNodeCount') == compute_min_nodes and \
ct_def.get('scaleSettings', {}).get('maxNodeCount') == compute_max_nodes:
self.ctx.log(
'Found compute target %s ...' % cluster_name)
return compute_target
else:
self.ctx.log('Delete existing AML compute context, since parameters has been modified.')
compute_target.delete()
# It works versy slow, so just change name
# cluster_name = self._fix_name(shortuuid.uuid())
# self.ctx.config.set('cluster/name', cluster_name)
# self.ctx.config.write()
try:
compute_target.wait_for_completion(show_output = True)
except Exception as e:
self.ctx.log_debug(str(e))
self.ctx.log('Creating new AML compute context %s...'%cluster_name)
provisioning_config = AmlCompute.provisioning_configuration(
vm_size=compute_sku, min_nodes=compute_min_nodes,
max_nodes=compute_max_nodes)
compute_target = ComputeTarget.create(
ws, cluster_name, provisioning_config)
compute_target.wait_for_completion(show_output = True)
return compute_target
示例3: _create_cluster
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def _create_cluster(workspace, cluster_name, vm_size, min_nodes, max_nodes):
"""Creates AzureML cluster
Args:
cluster_name (string): The name you wish to assign the cluster.
vm_size (string): The type of sku to use for your vm.
min_nodes (int): Minimum number of nodes in cluster.
Use 0 if you don't want to incur costs when it isn't being used.
max_nodes (int): Maximum number of nodes in cluster.
"""
logger = logging.getLogger(__name__)
try:
compute_target = ComputeTarget(workspace=workspace, name=cluster_name)
logger.info("Found existing compute target.")
except ComputeTargetException:
logger.info("Creating a new compute target...")
compute_config = AmlCompute.provisioning_configuration(
vm_size=vm_size, min_nodes=min_nodes, max_nodes=max_nodes
)
# create the cluster
compute_target = ComputeTarget.create(workspace, cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# use get_status() to get a detailed status for the current AmlCompute.
logger.debug(compute_target.get_status().serialize())
return compute_target
示例4: get_compute
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def get_compute(workspace: Workspace, compute_name: str, vm_size: str, for_batch_scoring: bool = False): # NOQA E501
try:
if compute_name in workspace.compute_targets:
compute_target = workspace.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("Found existing compute target " + compute_name + " so using it.") # NOQA
else:
e = Env()
compute_config = AmlCompute.provisioning_configuration(
vm_size=vm_size,
vm_priority=e.vm_priority if not for_batch_scoring else e.vm_priority_scoring, # NOQA E501
min_nodes=e.min_nodes if not for_batch_scoring else e.min_nodes_scoring, # NOQA E501
max_nodes=e.max_nodes if not for_batch_scoring else e.max_nodes_scoring, # NOQA E501
idle_seconds_before_scaledown="300"
# #Uncomment the below lines for VNet support
# vnet_resourcegroup_name=vnet_resourcegroup_name,
# vnet_name=vnet_name,
# subnet_name=subnet_name
)
compute_target = ComputeTarget.create(
workspace, compute_name, compute_config
)
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=10
)
return compute_target
except ComputeTargetException as ex:
print(ex)
print("An error occurred trying to provision compute.")
exit(1)
示例5: submit_local
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def submit_local(
self,
project_folder,
entry_script,
script_params,
dependencies_file=_DEPENDENCIES_FILE,
wait_for_completion=True,
docker_args=(),
):
"""Submit experiment for local execution
Args:
project_folder (string): Path of you source files for the experiment
entry_script (string): The filename of your script to run. Must be found in your project_folder
script_params (dict): Dictionary of script parameters
dependencies_file (string, optional): The location of your environment.yml to use to create the
environment your training script requires.
Defaults to _DEPENDENCIES_FILE.
wait_for_completion (bool, optional): Whether to block until experiment is done. Defaults to True.
docker_args (tuple, optional): Docker arguments to pass. Defaults to ().
"""
self._logger.info("Running in local mode")
self._submit(
dependencies_file,
project_folder,
entry_script,
"local",
script_params,
1,
1,
docker_args,
wait_for_completion,
)
示例6: submit
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def submit(
self,
project_folder,
entry_script,
script_params,
node_count=1,
workers_per_node=1,
distributed=None,
environment=None,
):
"""Submit experiment for remote execution on AzureML clusters.
Args:
project_folder (string): Path of you source files for the experiment
entry_script (string): The filename of your script to run. Must be found in your project_folder
script_params (dict): Dictionary of script parameters
dependencies_file (string, optional): The location of your environment.yml to use to
create the environment your training script requires.
node_count (int, optional): [description].
wait_for_completion (bool, optional): Whether to block until experiment is done. Defaults to True.
docker_args (tuple, optional): Docker arguments to pass. Defaults to ().
Returns:
azureml.core.Run: AzureML Run object
"""
self._logger.debug(script_params)
transformed_params = self._complete_datastore(script_params)
self._logger.debug("Transformed script params")
self._logger.debug(transformed_params)
if environment is None:
environment = create_environment_from_local()
environment.docker.shm_size = "8g"
environment.docker.base_image = _GPU_IMAGE
estimator = _create_estimator(
PyTorch,
project_folder,
entry_script,
self.cluster,
transformed_params,
node_count,
environment,
_get_distributed(distributed),
)
self._logger.debug(estimator.conda_dependencies.__dict__)
return self._experiment.submit(estimator)
示例7: submit
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def submit(
self,
project_folder,
entry_script,
script_params,
dependencies_file=_DEPENDENCIES_FILE,
node_count=_CLUSTER_MAX_NODES,
process_count_per_node=4,
wait_for_completion=True,
docker_args=(),
):
"""Submit experiment for remote execution on AzureML clusters
Args:
project_folder (string): Path of you source files for the experiment
entry_script (string): The filename of your script to run. Must be found in your project_folder
script_params (dict): Dictionary of script parameters
dependencies_file (string, optional): The location of your environment.yml to use to
create the environment your training script requires.
Defaults to _DEPENDENCIES_FILE.
node_count (int, optional): [description]. Defaults to _CLUSTER_MAX_NODES.
process_count_per_node (int, optional): Number of precesses to run on each node.
Usually should be the same as the number of GPU for GPU exeuction.
Defaults to 4.
wait_for_completion (bool, optional): Whether to block until experiment is done. Defaults to True.
docker_args (tuple, optional): Docker arguments to pass. Defaults to ().
Returns:
azureml.core.Run: AzureML Run object
"""
self._logger.debug(script_params)
transformed_params = self._complete_datastore(script_params)
self._logger.debug("Transformed script params")
self._logger.debug(transformed_params)
return self._submit(
dependencies_file,
project_folder,
entry_script,
self.cluster,
transformed_params,
node_count,
process_count_per_node,
docker_args,
wait_for_completion,
)
示例8: get_or_create_workspace
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def get_or_create_workspace(
config_path="./.azureml", subscription_id=None, resource_group=None, workspace_name=None, workspace_region=None,
):
"""
Method to get or create workspace.
Args:
config_path: optional directory to look for / store config.json file (defaults to current
directory)
subscription_id: Azure subscription id
resource_group: Azure resource group to create workspace and related resources
workspace_name: name of azure ml workspace
workspace_region: region for workspace
Returns:
obj: AzureML workspace if one exists already with the name otherwise creates a new one.
"""
config_file_path = "."
if config_path is not None:
config_dir, config_file_name = os.path.split(config_path)
if config_file_name != "config.json":
config_file_path = os.path.join(config_path, "config.json")
try:
# Get existing azure ml workspace
if os.path.isfile(config_file_path):
ws = Workspace.from_config(config_file_path, auth=get_auth())
else:
ws = Workspace.get(
name=workspace_name, subscription_id=subscription_id, resource_group=resource_group, auth=get_auth(),
)
except ProjectSystemException:
# This call might take a minute or two.
print("Creating new workspace")
ws = Workspace.create(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group,
create_resource_group=True,
location=workspace_region,
auth=get_auth(),
)
ws.write_config(path=config_path)
return ws
示例9: get_or_create_amlcompute
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def get_or_create_amlcompute(
workspace, compute_name, vm_size="", min_nodes=0, max_nodes=None, idle_seconds_before_scaledown=None, verbose=False,
):
"""
Get or create AmlCompute as the compute target. If a cluster of the same name is found,
attach it and rescale accordingly. Otherwise, create a new cluster.
Args:
workspace (Workspace): workspace
compute_name (str): name
vm_size (str, optional): vm size
min_nodes (int, optional): minimum number of nodes in cluster
max_nodes (None, optional): maximum number of nodes in cluster
idle_seconds_before_scaledown (None, optional): how long to wait before the cluster
autoscales down
verbose (bool, optional): if true, print logs
Returns:
Compute target
"""
try:
if verbose:
print("Found compute target: {}".format(compute_name))
compute_target = ComputeTarget(workspace=workspace, name=compute_name)
if len(compute_target.list_nodes()) < max_nodes:
if verbose:
print("Rescaling to {} nodes".format(max_nodes))
compute_target.update(max_nodes=max_nodes)
compute_target.wait_for_completion(show_output=verbose)
except ComputeTargetException:
if verbose:
print("Creating new compute target: {}".format(compute_name))
compute_config = AmlCompute.provisioning_configuration(
vm_size=vm_size,
min_nodes=min_nodes,
max_nodes=max_nodes,
idle_seconds_before_scaledown=idle_seconds_before_scaledown,
)
compute_target = ComputeTarget.create(workspace, compute_name, compute_config)
compute_target.wait_for_completion(show_output=verbose)
return compute_target
示例10: get_or_create_workspace
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def get_or_create_workspace(
config_path="./.azureml",
subscription_id=None,
resource_group=None,
workspace_name=None,
workspace_region=None,
):
"""
Method to get or create workspace.
Args:
config_path: optional directory to look for / store config.json file (defaults to current
directory)
subscription_id: Azure subscription id
resource_group: Azure resource group to create workspace and related resources
workspace_name: name of azure ml workspace
workspace_region: region for workspace
Returns:
obj: AzureML workspace if one exists already with the name otherwise creates a new one.
"""
config_file_path = "."
if config_path is not None:
config_dir, config_file_name = os.path.split(config_path)
if config_file_name != "config.json":
config_file_path = os.path.join(config_path, "config.json")
try:
# get existing azure ml workspace
if os.path.isfile(config_file_path):
ws = Workspace.from_config(config_file_path, auth=get_auth())
else:
ws = Workspace.get(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group,
auth=get_auth(),
)
except ProjectSystemException:
# this call might take a minute or two.
print("Creating new workspace")
ws = Workspace.create(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group,
create_resource_group=True,
location=workspace_region,
auth=get_auth(),
)
ws.write_config(path=config_path)
return ws
示例11: get_or_create_amlcompute
# 需要导入模块: from azureml.core.compute import ComputeTarget [as 别名]
# 或者: from azureml.core.compute.ComputeTarget import create [as 别名]
def get_or_create_amlcompute(
workspace,
compute_name,
vm_size="",
min_nodes=0,
max_nodes=None,
idle_seconds_before_scaledown=None,
verbose=False,
):
"""
Get or create AmlCompute as the compute target. If a cluster of the same name is found,
attach it and rescale accordingly. Otherwise, create a new cluster.
Args:
workspace (Workspace): workspace
compute_name (str): name
vm_size (str, optional): vm size
min_nodes (int, optional): minimum number of nodes in cluster
max_nodes (None, optional): maximum number of nodes in cluster
idle_seconds_before_scaledown (None, optional): how long to wait before the cluster
autoscales down
verbose (bool, optional): if true, print logs
Returns:
Compute target
"""
try:
if verbose:
print("Found compute target: {}".format(compute_name))
compute_target = ComputeTarget(workspace=workspace, name=compute_name)
if len(compute_target.list_nodes()) < max_nodes:
if verbose:
print("Rescaling to {} nodes".format(max_nodes))
compute_target.update(max_nodes=max_nodes)
compute_target.wait_for_completion(show_output=verbose)
except ComputeTargetException:
if verbose:
print("Creating new compute target: {}".format(compute_name))
compute_config = AmlCompute.provisioning_configuration(
vm_size=vm_size,
min_nodes=min_nodes,
max_nodes=max_nodes,
idle_seconds_before_scaledown=idle_seconds_before_scaledown,
)
compute_target = ComputeTarget.create(workspace, compute_name, compute_config)
compute_target.wait_for_completion(show_output=verbose)
return compute_target