本文整理汇总了Python中sagemaker.tensorflow.TensorFlow.attach方法的典型用法代码示例。如果您正苦于以下问题:Python TensorFlow.attach方法的具体用法?Python TensorFlow.attach怎么用?Python TensorFlow.attach使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sagemaker.tensorflow.TensorFlow
的用法示例。
在下文中一共展示了TensorFlow.attach方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_tf_async
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import attach [as 别名]
def test_tf_async(sagemaker_session):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
base_job_name='test-tf')
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf_iris')
estimator.fit(inputs, wait=False)
training_job_name = estimator.latest_training_job.name
time.sleep(20)
endpoint_name = training_job_name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
estimator = TensorFlow.attach(training_job_name=training_job_name, sagemaker_session=sagemaker_session)
json_predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.c4.xlarge',
endpoint_name=endpoint_name)
result = json_predictor.predict([6.4, 3.2, 4.5, 1.5])
print('predict result: {}'.format(result))
示例2: test_attach_custom_image
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import attach [as 别名]
def test_attach_custom_image(sagemaker_session):
training_image = '1.dkr.ecr.us-west-2.amazonaws.com/tensorflow_with_custom_binary:1.0'
rjd = {
'AlgorithmSpecification': {
'TrainingInputMode': 'File',
'TrainingImage': training_image},
'HyperParameters': {
'sagemaker_submit_directory': '"s3://some/sourcedir.tar.gz"',
'checkpoint_path': '"s3://other/1508872349"',
'sagemaker_program': '"iris-dnn-classifier.py"',
'sagemaker_enable_cloudwatch_metrics': 'false',
'sagemaker_container_log_level': '"logging.INFO"',
'sagemaker_job_name': '"neo"',
'training_steps': '100',
'evaluation_steps': '10'},
'RoleArn': 'arn:aws:iam::366:role/SageMakerRole',
'ResourceConfig': {
'VolumeSizeInGB': 30,
'InstanceCount': 1,
'InstanceType': 'ml.c4.xlarge'},
'StoppingCondition': {'MaxRuntimeInSeconds': 24 * 60 * 60},
'TrainingJobName': 'neo',
'TrainingJobStatus': 'Completed',
'OutputDataConfig': {'KmsKeyId': '', 'S3OutputPath': 's3://place/output/neo'},
'TrainingJobOutput': {'S3TrainingJobOutput': 's3://here/output.tar.gz'}}
sagemaker_session.sagemaker_client.describe_training_job = Mock(name='describe_training_job', return_value=rjd)
estimator = TensorFlow.attach(training_job_name='neo', sagemaker_session=sagemaker_session)
assert estimator.image_name == training_image
assert estimator.train_image() == training_image
示例3: test_attach_wrong_framework
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import attach [as 别名]
def test_attach_wrong_framework(sagemaker_session):
returned_job_description = {
'AlgorithmSpecification': {
'TrainingInputMode': 'File',
'TrainingImage': '1.dkr.ecr.us-west-2.amazonaws.com/sagemaker-mxnet-py2-cpu:1.0'
},
'HyperParameters': {
'sagemaker_submit_directory': '"s3://some/sourcedir.tar.gz"',
'sagemaker_program': '"iris-dnn-classifier.py"',
'sagemaker_enable_cloudwatch_metrics': 'false',
'sagemaker_container_log_level': '"logging.INFO"',
'training_steps': '100'
},
'RoleArn': 'arn:aws:iam::366:role/SageMakerRole',
'ResourceConfig':
{'VolumeSizeInGB': 30,
'InstanceCount': 1,
'InstanceType': 'ml.c4.xlarge'
},
'StoppingCondition': {
'MaxRuntimeInSeconds': 24 * 60 * 60
},
'TrainingJobName': 'neo',
'TrainingJobStatus': 'Completed',
'OutputDataConfig': {
'KmsKeyId': '',
'S3OutputPath': 's3://place/output/neo'
},
'TrainingJobOutput': {
'S3TrainingJobOutput': 's3://here/output.tar.gz'
}
}
sagemaker_session.sagemaker_client.describe_training_job = Mock(name='describe_training_job',
return_value=returned_job_description)
with pytest.raises(ValueError) as error:
TensorFlow.attach(training_job_name='neo', sagemaker_session=sagemaker_session)
assert "didn't use image for requested framework" in str(error)
示例4: test_attach
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import attach [as 别名]
def test_attach(sagemaker_session, tf_version):
training_image = '1.dkr.ecr.us-west-2.amazonaws.com/sagemaker-tensorflow-py2-cpu:{}-cpu-py2'.format(tf_version)
rjd = {
'AlgorithmSpecification': {
'TrainingInputMode': 'File',
'TrainingImage': training_image
},
'HyperParameters': {
'sagemaker_submit_directory': '"s3://some/sourcedir.tar.gz"',
'checkpoint_path': '"s3://other/1508872349"',
'sagemaker_program': '"iris-dnn-classifier.py"',
'sagemaker_enable_cloudwatch_metrics': 'false',
'sagemaker_container_log_level': '"logging.INFO"',
'sagemaker_job_name': '"neo"',
'training_steps': '100',
'evaluation_steps': '10'
},
'RoleArn': 'arn:aws:iam::366:role/SageMakerRole',
'ResourceConfig': {
'VolumeSizeInGB': 30,
'InstanceCount': 1,
'InstanceType': 'ml.c4.xlarge'
},
'StoppingCondition': {'MaxRuntimeInSeconds': 24 * 60 * 60},
'TrainingJobName': 'neo',
'TrainingJobStatus': 'Completed',
'OutputDataConfig': {'KmsKeyId': '', 'S3OutputPath': 's3://place/output/neo'},
'TrainingJobOutput': {'S3TrainingJobOutput': 's3://here/output.tar.gz'}}
sagemaker_session.sagemaker_client.describe_training_job = Mock(name='describe_training_job', return_value=rjd)
estimator = TensorFlow.attach(training_job_name='neo', sagemaker_session=sagemaker_session)
assert estimator.latest_training_job.job_name == 'neo'
assert estimator.py_version == 'py2'
assert estimator.framework_version == tf_version
assert estimator.role == 'arn:aws:iam::366:role/SageMakerRole'
assert estimator.train_instance_count == 1
assert estimator.train_max_run == 24 * 60 * 60
assert estimator.input_mode == 'File'
assert estimator.training_steps == 100
assert estimator.evaluation_steps == 10
assert estimator.input_mode == 'File'
assert estimator.base_job_name == 'neo'
assert estimator.output_path == 's3://place/output/neo'
assert estimator.output_kms_key == ''
assert estimator.hyperparameters()['training_steps'] == '100'
assert estimator.source_dir == 's3://some/sourcedir.tar.gz'
assert estimator.entry_point == 'iris-dnn-classifier.py'
assert estimator.checkpoint_path == 's3://other/1508872349'