本文整理汇总了Python中sagemaker.utils.unique_name_from_base方法的典型用法代码示例。如果您正苦于以下问题:Python utils.unique_name_from_base方法的具体用法?Python utils.unique_name_from_base怎么用?Python utils.unique_name_from_base使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sagemaker.utils
的用法示例。
在下文中一共展示了utils.unique_name_from_base方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _test_mnist_distributed
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def _test_mnist_distributed(sagemaker_session, image_uri, instance_type, dist_backend):
with timeout(minutes=DEFAULT_TIMEOUT):
pytorch = PyTorch(entry_point=mnist_script,
role='SageMakerRole',
train_instance_count=2,
train_instance_type=instance_type,
sagemaker_session=sagemaker_session,
image_name=image_uri,
debugger_hook_config=False,
hyperparameters={'backend': dist_backend, 'epochs': 2})
training_input = pytorch.sagemaker_session.upload_data(path=training_dir,
key_prefix='pytorch/mnist')
job_name = utils.unique_name_from_base('test-pytorch-mnist')
pytorch.fit({'training': training_input}, job_name=job_name)
示例2: test_mnist_gpu
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def test_mnist_gpu(sagemaker_session, image_uri, dist_gpu_backend):
with timeout(minutes=DEFAULT_TIMEOUT):
pytorch = PyTorch(entry_point=mnist_script,
role='SageMakerRole',
train_instance_count=2,
image_name=image_uri,
train_instance_type=MULTI_GPU_INSTANCE,
sagemaker_session=sagemaker_session,
debugger_hook_config=False,
hyperparameters={'backend': dist_gpu_backend})
training_input = sagemaker_session.upload_data(path=os.path.join(data_dir, 'training'),
key_prefix='pytorch/mnist')
job_name = utils.unique_name_from_base('test-pytorch-dist-ops')
pytorch.fit({'training': training_input}, job_name=job_name)
示例3: _test_dist_operations
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def _test_dist_operations(sagemaker_session, image_uri, instance_type, dist_backend, train_instance_count=3):
with timeout(minutes=DEFAULT_TIMEOUT):
pytorch = PyTorch(entry_point=dist_operations_path,
role='SageMakerRole',
train_instance_count=train_instance_count,
train_instance_type=instance_type,
sagemaker_session=sagemaker_session,
image_name=image_uri,
debugger_hook_config=False,
hyperparameters={'backend': dist_backend})
pytorch.sagemaker_session.default_bucket()
fake_input = pytorch.sagemaker_session.upload_data(path=dist_operations_path,
key_prefix='pytorch/distributed_operations')
job_name = utils.unique_name_from_base('test-pytorch-dist-ops')
pytorch.fit({'required_argument': fake_input}, job_name=job_name)
示例4: test_hosting
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def test_hosting(sagemaker_session, image_uri, instance_type, framework_version):
prefix = 'mxnet-serving/default-handlers'
model_data = sagemaker_session.upload_data(path=MODEL_PATH, key_prefix=prefix)
model = MXNetModel(model_data,
'SageMakerRole',
SCRIPT_PATH,
image=image_uri,
framework_version=framework_version,
sagemaker_session=sagemaker_session)
endpoint_name = utils.unique_name_from_base('test-mxnet-serving')
with timeout.timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = model.deploy(1, instance_type, endpoint_name=endpoint_name)
output = predictor.predict([[1, 2]])
assert [[4.9999918937683105]] == output
示例5: test_batch_transform
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def test_batch_transform(sagemaker_session, image_uri, instance_type, framework_version):
s3_prefix = 'mxnet-serving/mnist'
model_data = sagemaker_session.upload_data(path=MODEL_PATH, key_prefix=s3_prefix)
model = MXNetModel(model_data,
'SageMakerRole',
SCRIPT_PATH,
image=image_uri,
framework_version=framework_version,
sagemaker_session=sagemaker_session)
transformer = model.transformer(1, instance_type)
with timeout.timeout_and_delete_model_with_transformer(transformer, sagemaker_session, minutes=20):
input_data = sagemaker_session.upload_data(path=DATA_PATH, key_prefix=s3_prefix)
job_name = utils.unique_name_from_base('test-mxnet-serving-batch')
transformer.transform(input_data, content_type='text/csv', job_name=job_name)
transformer.wait()
prediction = _transform_result(sagemaker_session.boto_session, transformer.output_path)
assert prediction == 7
示例6: test_elastic_inference
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def test_elastic_inference(image_uri, sagemaker_session, instance_type, accelerator_type, framework_version):
endpoint_name = utils.unique_name_from_base('test-mxnet-ei')
with timeout_and_delete_endpoint_by_name(endpoint_name=endpoint_name,
sagemaker_session=sagemaker_session,
minutes=20):
prefix = 'mxnet-serving/default-handlers'
model_data = sagemaker_session.upload_data(path=MODEL_PATH, key_prefix=prefix)
model = MXNetModel(model_data=model_data,
entry_point=SCRIPT_PATH,
role='SageMakerRole',
image=image_uri,
framework_version=framework_version,
sagemaker_session=sagemaker_session)
predictor = model.deploy(initial_instance_count=1,
instance_type=instance_type,
accelerator_type=accelerator_type,
endpoint_name=endpoint_name)
output = predictor.predict([[1, 2]])
assert [[4.9999918937683105]] == output
示例7: test_mnist_distributed
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def test_mnist_distributed(sagemaker_session, instance_type, tf_full_version, py_version):
estimator = TensorFlow(
entry_point=SCRIPT,
role=ROLE,
train_instance_count=2,
train_instance_type=instance_type,
sagemaker_session=sagemaker_session,
py_version="py37",
script_mode=True,
framework_version=tf_full_version,
distributions=PARAMETER_SERVER_DISTRIBUTION,
)
inputs = estimator.sagemaker_session.upload_data(
path=os.path.join(MNIST_RESOURCE_PATH, "data"), key_prefix="scriptmode/distributed_mnist"
)
with tests.integ.timeout.timeout(minutes=tests.integ.TRAINING_DEFAULT_TIMEOUT_MINUTES):
estimator.fit(inputs=inputs, job_name=unique_name_from_base("test-tf-sm-distributed"))
assert_s3_files_exist(
sagemaker_session,
estimator.model_dir,
["graph.pbtxt", "model.ckpt-0.index", "model.ckpt-0.meta"],
)
示例8: predictor
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def predictor(sagemaker_session, tf_serving_version):
endpoint_name = unique_name_from_base("sagemaker-tensorflow-serving")
model_data = sagemaker_session.upload_data(
path=os.path.join(tests.integ.DATA_DIR, "tensorflow-serving-test-model.tar.gz"),
key_prefix="tensorflow-serving/models",
)
with tests.integ.timeout.timeout_and_delete_endpoint_by_name(
endpoint_name=endpoint_name, sagemaker_session=sagemaker_session, hours=2
):
model = Model(
model_data=model_data,
role=ROLE,
framework_version=tf_serving_version,
sagemaker_session=sagemaker_session,
)
predictor = model.deploy(
INSTANCE_COUNT,
INSTANCE_TYPE,
endpoint_name=endpoint_name,
data_capture_config=DataCaptureConfig(True, sagemaker_session=sagemaker_session),
)
yield predictor
示例9: test_coach_mxnet
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def test_coach_mxnet(sagemaker_session, rl_coach_mxnet_full_version, cpu_instance_type):
estimator = _test_coach(
sagemaker_session, RLFramework.MXNET, rl_coach_mxnet_full_version, cpu_instance_type
)
job_name = unique_name_from_base("test-coach-mxnet")
with timeout(minutes=15):
estimator.fit(wait="False", job_name=job_name)
estimator = RLEstimator.attach(
estimator.latest_training_job.name, sagemaker_session=sagemaker_session
)
endpoint_name = "test-mxnet-coach-deploy-{}".format(sagemaker_timestamp())
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = estimator.deploy(
1, cpu_instance_type, entry_point="mxnet_deploy.py", endpoint_name=endpoint_name
)
observation = numpy.asarray([0, 0, 0, 0])
action = predictor.predict(observation)
assert 0 < action[0][0] < 1
assert 0 < action[0][1] < 1
示例10: test_coach_tf
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def test_coach_tf(sagemaker_session, rl_coach_tf_full_version, cpu_instance_type):
estimator = _test_coach(
sagemaker_session, RLFramework.TENSORFLOW, rl_coach_tf_full_version, cpu_instance_type
)
job_name = unique_name_from_base("test-coach-tf")
with timeout(minutes=15):
estimator.fit(job_name=job_name)
endpoint_name = "test-tf-coach-deploy-{}".format(sagemaker_timestamp())
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = estimator.deploy(1, cpu_instance_type)
observation = numpy.asarray([0, 0, 0, 0])
action = predictor.predict(observation)
assert action == {"predictions": [[0.5, 0.5]]}
示例11: test_ray_tf
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def test_ray_tf(sagemaker_session, rl_ray_full_version, cpu_instance_type):
source_dir = os.path.join(DATA_DIR, "ray_cartpole")
cartpole = "train_ray.py"
estimator = RLEstimator(
entry_point=cartpole,
source_dir=source_dir,
toolkit=RLToolkit.RAY,
framework=RLFramework.TENSORFLOW,
toolkit_version=rl_ray_full_version,
sagemaker_session=sagemaker_session,
role="SageMakerRole",
train_instance_type=cpu_instance_type,
train_instance_count=1,
)
job_name = unique_name_from_base("test-ray-tf")
with timeout(minutes=15):
estimator.fit(job_name=job_name)
with pytest.raises(NotImplementedError) as e:
estimator.deploy(1, cpu_instance_type)
assert "Automatic deployment of Ray models is not currently available" in str(e.value)
示例12: test_failed_training_job
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def test_failed_training_job(sagemaker_session, sklearn_full_version, cpu_instance_type):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, "sklearn_mnist", "failure_script.py")
data_path = os.path.join(DATA_DIR, "sklearn_mnist")
sklearn = SKLearn(
entry_point=script_path,
role="SageMakerRole",
framework_version=sklearn_full_version,
py_version=PYTHON_VERSION,
train_instance_count=1,
train_instance_type=cpu_instance_type,
sagemaker_session=sagemaker_session,
)
train_input = sklearn.sagemaker_session.upload_data(
path=os.path.join(data_path, "train"), key_prefix="integ-test-data/sklearn_mnist/train"
)
job_name = unique_name_from_base("test-sklearn-failed")
with pytest.raises(ValueError):
sklearn.fit(train_input, job_name=job_name)
示例13: mxnet_estimator
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def mxnet_estimator(sagemaker_session, mxnet_full_version, cpu_instance_type):
mx = MXNet(
entry_point=os.path.join(MXNET_MNIST_PATH, "mnist.py"),
role="SageMakerRole",
train_instance_count=1,
train_instance_type=cpu_instance_type,
sagemaker_session=sagemaker_session,
framework_version=mxnet_full_version,
)
train_input = mx.sagemaker_session.upload_data(
path=os.path.join(MXNET_MNIST_PATH, "train"), key_prefix="integ-test-data/mxnet_mnist/train"
)
test_input = mx.sagemaker_session.upload_data(
path=os.path.join(MXNET_MNIST_PATH, "test"), key_prefix="integ-test-data/mxnet_mnist/test"
)
job_name = unique_name_from_base("test-mxnet-transform")
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
mx.fit({"train": train_input, "test": test_input}, job_name=job_name)
return mx
示例14: test_single_transformer_multiple_jobs
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def test_single_transformer_multiple_jobs(
mxnet_estimator, mxnet_transform_input, sagemaker_session, cpu_instance_type
):
transformer = mxnet_estimator.transformer(1, cpu_instance_type)
job_name = unique_name_from_base("test-mxnet-transform")
transformer.transform(mxnet_transform_input, content_type="text/csv", job_name=job_name)
with timeout_and_delete_model_with_transformer(
transformer, sagemaker_session, minutes=TRANSFORM_DEFAULT_TIMEOUT_MINUTES
):
assert transformer.output_path == "s3://{}/{}".format(
sagemaker_session.default_bucket(), job_name
)
job_name = unique_name_from_base("test-mxnet-transform")
transformer.transform(mxnet_transform_input, content_type="text/csv", job_name=job_name)
assert transformer.output_path == "s3://{}/{}".format(
sagemaker_session.default_bucket(), job_name
)
示例15: _create_transformer_and_transform_job
# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import unique_name_from_base [as 别名]
def _create_transformer_and_transform_job(
estimator,
transform_input,
instance_type,
volume_kms_key=None,
input_filter=None,
output_filter=None,
join_source=None,
wait=False,
logs=False,
):
transformer = estimator.transformer(1, instance_type, volume_kms_key=volume_kms_key)
transformer.transform(
transform_input,
content_type="text/csv",
input_filter=input_filter,
output_filter=output_filter,
join_source=join_source,
wait=wait,
logs=logs,
job_name=unique_name_from_base("test-transform"),
)
return transformer