本文整理汇总了Python中sagemaker.tensorflow.TensorFlow类的典型用法代码示例。如果您正苦于以下问题:Python TensorFlow类的具体用法?Python TensorFlow怎么用?Python TensorFlow使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TensorFlow类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_tf
def test_tf(sagemaker_session, tf_full_version):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
base_job_name='test-tf')
inputs = sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf_iris')
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
json_predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.c4.xlarge',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
示例2: test_tf_async
def test_tf_async(sagemaker_session):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
base_job_name='test-tf')
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf_iris')
estimator.fit(inputs, wait=False)
training_job_name = estimator.latest_training_job.name
time.sleep(20)
endpoint_name = training_job_name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
estimator = TensorFlow.attach(training_job_name=training_job_name, sagemaker_session=sagemaker_session)
json_predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.c4.xlarge',
endpoint_name=endpoint_name)
result = json_predictor.predict([6.4, 3.2, 4.5, 1.5])
print('predict result: {}'.format(result))
示例3: test_failed_tf_training
def test_failed_tf_training(sagemaker_session, tf_full_version):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'iris', 'failure_script.py')
ec2_client = sagemaker_session.boto_session.client('ec2')
subnet, security_group_id = get_or_create_subnet_and_security_group(ec2_client, VPC_NAME)
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
subnets=[subnet],
security_group_ids=[security_group_id])
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf-failure')
with pytest.raises(ValueError) as e:
estimator.fit(inputs)
assert 'This failure is expected' in str(e.value)
job_desc = estimator.sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=estimator.latest_training_job.name)
assert [subnet] == job_desc['VpcConfig']['Subnets']
assert [security_group_id] == job_desc['VpcConfig']['SecurityGroupIds']
示例4: test_cifar
def test_cifar(sagemaker_session, tf_full_version):
with timeout(minutes=45):
script_path = os.path.join(DATA_DIR, 'cifar_10', 'source')
dataset_path = os.path.join(DATA_DIR, 'cifar_10', 'data')
estimator = TensorFlow(entry_point='resnet_cifar_10.py', source_dir=script_path, role='SageMakerRole',
framework_version=tf_full_version, training_steps=500, evaluation_steps=5,
train_instance_count=2, train_instance_type='ml.p2.xlarge',
sagemaker_session=sagemaker_session, train_max_run=45 * 60,
base_job_name='test-cifar')
inputs = estimator.sagemaker_session.upload_data(path=dataset_path, key_prefix='data/cifar10')
estimator.fit(inputs, logs=False)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.p2.xlarge')
predictor.serializer = PickleSerializer()
predictor.content_type = PICKLE_CONTENT_TYPE
data = np.random.randn(32, 32, 3)
predict_response = predictor.predict(data)
assert len(predict_response['outputs']['probabilities']['floatVal']) == 10
示例5: test_train_image_default
def test_train_image_default(sagemaker_session):
tf = TensorFlow(entry_point=SCRIPT_PATH,
role=ROLE,
sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE)
assert _get_full_cpu_image_uri(defaults.TF_VERSION) in tf.train_image()
示例6: test_run_tensorboard_locally_without_awscli_binary
def test_run_tensorboard_locally_without_awscli_binary(time, strftime, popen, call, access, sagemaker_session):
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE)
with pytest.raises(EnvironmentError) as error:
tf.fit(inputs='s3://mybucket/train', run_tensorboard_locally=True)
assert str(error.value) == 'The AWS CLI is not installed in the system. Please install the AWS CLI using the ' \
'following command: \n pip install awscli'
示例7: test_run_tensorboard_locally
def test_run_tensorboard_locally(sleep, time, strftime, popen, call, access, rmtree, mkdtemp, sync, sagemaker_session):
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE)
popen().poll.return_value = None
tf.fit(inputs='s3://mybucket/train', run_tensorboard_locally=True)
popen.assert_called_with(['tensorboard', '--logdir', '/my/temp/folder', '--host', 'localhost', '--port', '6006'],
stderr=-1,
stdout=-1)
示例8: test_run_tensorboard_locally_port_in_use
def test_run_tensorboard_locally_port_in_use(time, strftime, popen, call, access, socket, rmtree, mkdtemp, sync,
sagemaker_session):
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE)
popen().poll.side_effect = [-1, None]
tf.fit(inputs='s3://mybucket/train', run_tensorboard_locally=True)
popen.assert_any_call(['tensorboard', '--logdir', '/my/temp/folder', '--host', 'localhost', '--port', '6006'],
stderr=-1, stdout=-1)
popen.assert_any_call(['tensorboard', '--logdir', '/my/temp/folder', '--host', 'localhost', '--port', '6007'],
stderr=-1, stdout=-1)
示例9: test_create_model_with_custom_image
def test_create_model_with_custom_image(sagemaker_session):
container_log_level = '"logging.INFO"'
source_dir = 's3://mybucket/source'
custom_image = 'tensorflow:1.0'
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
training_steps=1000, evaluation_steps=10, train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE, image_name=custom_image,
container_log_level=container_log_level, base_job_name='job',
source_dir=source_dir)
job_name = 'doing something'
tf.fit(inputs='s3://mybucket/train', job_name=job_name)
model = tf.create_model()
assert model.image == custom_image
示例10: test_attach_custom_image
def test_attach_custom_image(sagemaker_session):
training_image = '1.dkr.ecr.us-west-2.amazonaws.com/tensorflow_with_custom_binary:1.0'
rjd = {
'AlgorithmSpecification': {
'TrainingInputMode': 'File',
'TrainingImage': training_image},
'HyperParameters': {
'sagemaker_submit_directory': '"s3://some/sourcedir.tar.gz"',
'checkpoint_path': '"s3://other/1508872349"',
'sagemaker_program': '"iris-dnn-classifier.py"',
'sagemaker_enable_cloudwatch_metrics': 'false',
'sagemaker_container_log_level': '"logging.INFO"',
'sagemaker_job_name': '"neo"',
'training_steps': '100',
'evaluation_steps': '10'},
'RoleArn': 'arn:aws:iam::366:role/SageMakerRole',
'ResourceConfig': {
'VolumeSizeInGB': 30,
'InstanceCount': 1,
'InstanceType': 'ml.c4.xlarge'},
'StoppingCondition': {'MaxRuntimeInSeconds': 24 * 60 * 60},
'TrainingJobName': 'neo',
'TrainingJobStatus': 'Completed',
'OutputDataConfig': {'KmsKeyId': '', 'S3OutputPath': 's3://place/output/neo'},
'TrainingJobOutput': {'S3TrainingJobOutput': 's3://here/output.tar.gz'}}
sagemaker_session.sagemaker_client.describe_training_job = Mock(name='describe_training_job', return_value=rjd)
estimator = TensorFlow.attach(training_job_name='neo', sagemaker_session=sagemaker_session)
assert estimator.image_name == training_image
assert estimator.train_image() == training_image
示例11: test_failed_tf_training
def test_failed_tf_training(sagemaker_session, tf_full_version):
with timeout(minutes=15):
script_path = os.path.join(DATA_DIR, 'iris', 'failure_script.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session)
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf-failure')
with pytest.raises(ValueError) as e:
estimator.fit(inputs)
assert 'This failure is expected' in str(e.value)
示例12: test_create_model_with_optional_params
def test_create_model_with_optional_params(sagemaker_session):
container_log_level = '"logging.INFO"'
source_dir = 's3://mybucket/source'
enable_cloudwatch_metrics = 'true'
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
training_steps=1000, evaluation_steps=10, train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE, container_log_level=container_log_level, base_job_name='job',
source_dir=source_dir, enable_cloudwatch_metrics=enable_cloudwatch_metrics)
job_name = 'doing something'
tf.fit(inputs='s3://mybucket/train', job_name=job_name)
new_role = 'role'
model_server_workers = 2
model = tf.create_model(role=new_role, model_server_workers=2)
assert model.role == new_role
assert model.model_server_workers == model_server_workers
示例13: test_create_model
def test_create_model(sagemaker_session, tf_version):
container_log_level = '"logging.INFO"'
source_dir = 's3://mybucket/source'
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
training_steps=1000, evaluation_steps=10, train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE, framework_version=tf_version,
container_log_level=container_log_level, base_job_name='job',
source_dir=source_dir)
job_name = 'doing something'
tf.fit(inputs='s3://mybucket/train', job_name=job_name)
model = tf.create_model()
assert model.sagemaker_session == sagemaker_session
assert model.framework_version == tf_version
assert model.py_version == tf.py_version
assert model.entry_point == SCRIPT_PATH
assert model.role == ROLE
assert model.name == job_name
assert model.container_log_level == container_log_level
assert model.source_dir == source_dir
示例14: test_attach_wrong_framework
def test_attach_wrong_framework(sagemaker_session):
returned_job_description = {
'AlgorithmSpecification': {
'TrainingInputMode': 'File',
'TrainingImage': '1.dkr.ecr.us-west-2.amazonaws.com/sagemaker-mxnet-py2-cpu:1.0'
},
'HyperParameters': {
'sagemaker_submit_directory': '"s3://some/sourcedir.tar.gz"',
'sagemaker_program': '"iris-dnn-classifier.py"',
'sagemaker_enable_cloudwatch_metrics': 'false',
'sagemaker_container_log_level': '"logging.INFO"',
'training_steps': '100'
},
'RoleArn': 'arn:aws:iam::366:role/SageMakerRole',
'ResourceConfig':
{'VolumeSizeInGB': 30,
'InstanceCount': 1,
'InstanceType': 'ml.c4.xlarge'
},
'StoppingCondition': {
'MaxRuntimeInSeconds': 24 * 60 * 60
},
'TrainingJobName': 'neo',
'TrainingJobStatus': 'Completed',
'OutputDataConfig': {
'KmsKeyId': '',
'S3OutputPath': 's3://place/output/neo'
},
'TrainingJobOutput': {
'S3TrainingJobOutput': 's3://here/output.tar.gz'
}
}
sagemaker_session.sagemaker_client.describe_training_job = Mock(name='describe_training_job',
return_value=returned_job_description)
with pytest.raises(ValueError) as error:
TensorFlow.attach(training_job_name='neo', sagemaker_session=sagemaker_session)
assert "didn't use image for requested framework" in str(error)
示例15: test_tf
def test_tf(m_tar, e_tar, time, strftime, sagemaker_session, tf_version):
tf = TensorFlow(entry_point=SCRIPT_FILE, role=ROLE, sagemaker_session=sagemaker_session, training_steps=1000,
evaluation_steps=10, train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE,
framework_version=tf_version, requirements_file=REQUIREMENTS_FILE, source_dir=DATA_DIR)
inputs = 's3://mybucket/train'
s3_prefix = 's3://{}/{}/source/sourcedir.tar.gz'.format(BUCKET_NAME, JOB_NAME)
e_tar.return_value = UploadedCode(s3_prefix=s3_prefix, script_name=SCRIPT_FILE)
s3_prefix = 's3://{}/{}/sourcedir.tar.gz'.format(BUCKET_NAME, JOB_NAME)
m_tar.return_value = UploadedCode(s3_prefix=s3_prefix, script_name=SCRIPT_FILE)
tf.fit(inputs=inputs)
call_names = [c[0] for c in sagemaker_session.method_calls]
assert call_names == ['train', 'logs_for_job']
expected_train_args = _create_train_job(tf_version)
expected_train_args['input_config'][0]['DataSource']['S3DataSource']['S3Uri'] = inputs
actual_train_args = sagemaker_session.method_calls[0][2]
assert actual_train_args == expected_train_args
model = tf.create_model()
environment = {
'Environment': {
'SAGEMAKER_SUBMIT_DIRECTORY': 's3://{}/{}/sourcedir.tar.gz'.format(BUCKET_NAME, JOB_NAME),
'SAGEMAKER_PROGRAM': 'dummy_script.py', 'SAGEMAKER_REQUIREMENTS': 'dummy_requirements.txt',
'SAGEMAKER_ENABLE_CLOUDWATCH_METRICS': 'false', 'SAGEMAKER_REGION': 'us-west-2',
'SAGEMAKER_CONTAINER_LOG_LEVEL': '20'
},
'Image': create_image_uri('us-west-2', "tensorflow", INSTANCE_TYPE, tf_version, "py2"),
'ModelDataUrl': 's3://m/m.tar.gz'
}
assert environment == model.prepare_container_def(INSTANCE_TYPE)
assert 'cpu' in model.prepare_container_def(INSTANCE_TYPE)['Image']
predictor = tf.deploy(1, INSTANCE_TYPE)
assert isinstance(predictor, TensorFlowPredictor)