本文整理汇总了Python中sagemaker.tensorflow.TensorFlow.fit方法的典型用法代码示例。如果您正苦于以下问题:Python TensorFlow.fit方法的具体用法?Python TensorFlow.fit怎么用?Python TensorFlow.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sagemaker.tensorflow.TensorFlow
的用法示例。
在下文中一共展示了TensorFlow.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_cifar
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_cifar(sagemaker_session, tf_full_version):
with timeout(minutes=45):
script_path = os.path.join(DATA_DIR, 'cifar_10', 'source')
dataset_path = os.path.join(DATA_DIR, 'cifar_10', 'data')
estimator = TensorFlow(entry_point='resnet_cifar_10.py', source_dir=script_path, role='SageMakerRole',
framework_version=tf_full_version, training_steps=500, evaluation_steps=5,
train_instance_count=2, train_instance_type='ml.p2.xlarge',
sagemaker_session=sagemaker_session, train_max_run=45 * 60,
base_job_name='test-cifar')
inputs = estimator.sagemaker_session.upload_data(path=dataset_path, key_prefix='data/cifar10')
estimator.fit(inputs, logs=False)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.p2.xlarge')
predictor.serializer = PickleSerializer()
predictor.content_type = PICKLE_CONTENT_TYPE
data = np.random.randn(32, 32, 3)
predict_response = predictor.predict(data)
assert len(predict_response['outputs']['probabilities']['floatVal']) == 10
示例2: test_tf_async
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_tf_async(sagemaker_session):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
base_job_name='test-tf')
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf_iris')
estimator.fit(inputs, wait=False)
training_job_name = estimator.latest_training_job.name
time.sleep(20)
endpoint_name = training_job_name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
estimator = TensorFlow.attach(training_job_name=training_job_name, sagemaker_session=sagemaker_session)
json_predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.c4.xlarge',
endpoint_name=endpoint_name)
result = json_predictor.predict([6.4, 3.2, 4.5, 1.5])
print('predict result: {}'.format(result))
示例3: test_failed_tf_training
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_failed_tf_training(sagemaker_session, tf_full_version):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'iris', 'failure_script.py')
ec2_client = sagemaker_session.boto_session.client('ec2')
subnet, security_group_id = get_or_create_subnet_and_security_group(ec2_client, VPC_NAME)
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
subnets=[subnet],
security_group_ids=[security_group_id])
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf-failure')
with pytest.raises(ValueError) as e:
estimator.fit(inputs)
assert 'This failure is expected' in str(e.value)
job_desc = estimator.sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=estimator.latest_training_job.name)
assert [subnet] == job_desc['VpcConfig']['Subnets']
assert [security_group_id] == job_desc['VpcConfig']['SecurityGroupIds']
示例4: test_tf
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_tf(sagemaker_session, tf_full_version):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
base_job_name='test-tf')
inputs = sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf_iris')
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
json_predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.c4.xlarge',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
示例5: test_run_tensorboard_locally_without_awscli_binary
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_run_tensorboard_locally_without_awscli_binary(time, strftime, popen, call, access, sagemaker_session):
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE)
with pytest.raises(EnvironmentError) as error:
tf.fit(inputs='s3://mybucket/train', run_tensorboard_locally=True)
assert str(error.value) == 'The AWS CLI is not installed in the system. Please install the AWS CLI using the ' \
'following command: \n pip install awscli'
示例6: test_run_tensorboard_locally
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_run_tensorboard_locally(sleep, time, strftime, popen, call, access, rmtree, mkdtemp, sync, sagemaker_session):
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE)
popen().poll.return_value = None
tf.fit(inputs='s3://mybucket/train', run_tensorboard_locally=True)
popen.assert_called_with(['tensorboard', '--logdir', '/my/temp/folder', '--host', 'localhost', '--port', '6006'],
stderr=-1,
stdout=-1)
示例7: test_run_tensorboard_locally_port_in_use
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_run_tensorboard_locally_port_in_use(time, strftime, popen, call, access, socket, rmtree, mkdtemp, sync,
sagemaker_session):
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE)
popen().poll.side_effect = [-1, None]
tf.fit(inputs='s3://mybucket/train', run_tensorboard_locally=True)
popen.assert_any_call(['tensorboard', '--logdir', '/my/temp/folder', '--host', 'localhost', '--port', '6006'],
stderr=-1, stdout=-1)
popen.assert_any_call(['tensorboard', '--logdir', '/my/temp/folder', '--host', 'localhost', '--port', '6007'],
stderr=-1, stdout=-1)
示例8: test_create_model_with_custom_image
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_create_model_with_custom_image(sagemaker_session):
container_log_level = '"logging.INFO"'
source_dir = 's3://mybucket/source'
custom_image = 'tensorflow:1.0'
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
training_steps=1000, evaluation_steps=10, train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE, image_name=custom_image,
container_log_level=container_log_level, base_job_name='job',
source_dir=source_dir)
job_name = 'doing something'
tf.fit(inputs='s3://mybucket/train', job_name=job_name)
model = tf.create_model()
assert model.image == custom_image
示例9: test_create_model_with_optional_params
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_create_model_with_optional_params(sagemaker_session):
container_log_level = '"logging.INFO"'
source_dir = 's3://mybucket/source'
enable_cloudwatch_metrics = 'true'
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
training_steps=1000, evaluation_steps=10, train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE, container_log_level=container_log_level, base_job_name='job',
source_dir=source_dir, enable_cloudwatch_metrics=enable_cloudwatch_metrics)
job_name = 'doing something'
tf.fit(inputs='s3://mybucket/train', job_name=job_name)
new_role = 'role'
model_server_workers = 2
model = tf.create_model(role=new_role, model_server_workers=2)
assert model.role == new_role
assert model.model_server_workers == model_server_workers
示例10: test_failed_tf_training
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_failed_tf_training(sagemaker_session, tf_full_version):
with timeout(minutes=15):
script_path = os.path.join(DATA_DIR, 'iris', 'failure_script.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session)
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf-failure')
with pytest.raises(ValueError) as e:
estimator.fit(inputs)
assert 'This failure is expected' in str(e.value)
示例11: test_tf_local_mode
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_tf_local_mode(tf_full_version, sagemaker_local_session):
local_mode_lock_fd = open(LOCK_PATH, 'w')
local_mode_lock = local_mode_lock_fd.fileno()
with timeout(minutes=5):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='local',
base_job_name='test-tf',
sagemaker_session=sagemaker_local_session)
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH,
key_prefix='integ-test-data/tf_iris')
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
try:
# Since Local Mode uses the same port for serving, we need a lock in order
# to allow concurrent test execution. The serving test is really fast so it still
# makes sense to allow this behavior.
fcntl.lockf(local_mode_lock, fcntl.LOCK_EX)
json_predictor = estimator.deploy(initial_instance_count=1,
instance_type='local',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
time.sleep(5)
fcntl.lockf(local_mode_lock, fcntl.LOCK_UN)
示例12: test_create_model
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_create_model(sagemaker_session, tf_version):
container_log_level = '"logging.INFO"'
source_dir = 's3://mybucket/source'
tf = TensorFlow(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session,
training_steps=1000, evaluation_steps=10, train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE, framework_version=tf_version,
container_log_level=container_log_level, base_job_name='job',
source_dir=source_dir)
job_name = 'doing something'
tf.fit(inputs='s3://mybucket/train', job_name=job_name)
model = tf.create_model()
assert model.sagemaker_session == sagemaker_session
assert model.framework_version == tf_version
assert model.py_version == tf.py_version
assert model.entry_point == SCRIPT_PATH
assert model.role == ROLE
assert model.name == job_name
assert model.container_log_level == container_log_level
assert model.source_dir == source_dir
示例13: test_tf
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_tf(m_tar, e_tar, time, strftime, sagemaker_session, tf_version):
tf = TensorFlow(entry_point=SCRIPT_FILE, role=ROLE, sagemaker_session=sagemaker_session, training_steps=1000,
evaluation_steps=10, train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE,
framework_version=tf_version, requirements_file=REQUIREMENTS_FILE, source_dir=DATA_DIR)
inputs = 's3://mybucket/train'
s3_prefix = 's3://{}/{}/source/sourcedir.tar.gz'.format(BUCKET_NAME, JOB_NAME)
e_tar.return_value = UploadedCode(s3_prefix=s3_prefix, script_name=SCRIPT_FILE)
s3_prefix = 's3://{}/{}/sourcedir.tar.gz'.format(BUCKET_NAME, JOB_NAME)
m_tar.return_value = UploadedCode(s3_prefix=s3_prefix, script_name=SCRIPT_FILE)
tf.fit(inputs=inputs)
call_names = [c[0] for c in sagemaker_session.method_calls]
assert call_names == ['train', 'logs_for_job']
expected_train_args = _create_train_job(tf_version)
expected_train_args['input_config'][0]['DataSource']['S3DataSource']['S3Uri'] = inputs
actual_train_args = sagemaker_session.method_calls[0][2]
assert actual_train_args == expected_train_args
model = tf.create_model()
environment = {
'Environment': {
'SAGEMAKER_SUBMIT_DIRECTORY': 's3://{}/{}/sourcedir.tar.gz'.format(BUCKET_NAME, JOB_NAME),
'SAGEMAKER_PROGRAM': 'dummy_script.py', 'SAGEMAKER_REQUIREMENTS': 'dummy_requirements.txt',
'SAGEMAKER_ENABLE_CLOUDWATCH_METRICS': 'false', 'SAGEMAKER_REGION': 'us-west-2',
'SAGEMAKER_CONTAINER_LOG_LEVEL': '20'
},
'Image': create_image_uri('us-west-2', "tensorflow", INSTANCE_TYPE, tf_version, "py2"),
'ModelDataUrl': 's3://m/m.tar.gz'
}
assert environment == model.prepare_container_def(INSTANCE_TYPE)
assert 'cpu' in model.prepare_container_def(INSTANCE_TYPE)['Image']
predictor = tf.deploy(1, INSTANCE_TYPE)
assert isinstance(predictor, TensorFlowPredictor)
示例14: test_deploy
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_deploy(sagemaker_session, tf_version):
estimator = TensorFlow(entry_point=SCRIPT, source_dir=SOURCE_DIR, role=ROLE,
framework_version=tf_version,
train_instance_count=2, train_instance_type=INSTANCE_TYPE_CPU,
sagemaker_session=sagemaker_session,
base_job_name='test-cifar')
estimator.fit('s3://mybucket/train')
print('job succeeded: {}'.format(estimator.latest_training_job.name))
estimator.deploy(initial_instance_count=1, instance_type=INSTANCE_TYPE_CPU)
image = IMAGE_URI_FORMAT_STRING.format(REGION, CPU_IMAGE_NAME, tf_version, 'cpu', 'py2')
sagemaker_session.create_model.assert_called_with(
estimator._current_job_name,
ROLE,
{'Environment':
{'SAGEMAKER_ENABLE_CLOUDWATCH_METRICS': 'false',
'SAGEMAKER_CONTAINER_LOG_LEVEL': '20',
'SAGEMAKER_SUBMIT_DIRECTORY': SOURCE_DIR,
'SAGEMAKER_REQUIREMENTS': '',
'SAGEMAKER_REGION': REGION,
'SAGEMAKER_PROGRAM': SCRIPT},
'Image': image,
'ModelDataUrl': 's3://m/m.tar.gz'})
示例15: test_keras
# 需要导入模块: from sagemaker.tensorflow import TensorFlow [as 别名]
# 或者: from sagemaker.tensorflow.TensorFlow import fit [as 别名]
def test_keras(sagemaker_session, tf_full_version):
script_path = os.path.join(DATA_DIR, 'cifar_10', 'source')
dataset_path = os.path.join(DATA_DIR, 'cifar_10', 'data')
with timeout(minutes=45):
estimator = TensorFlow(entry_point='keras_cnn_cifar_10.py',
source_dir=script_path,
role='SageMakerRole', sagemaker_session=sagemaker_session,
hyperparameters={'learning_rate': 1e-4, 'decay': 1e-6},
training_steps=500, evaluation_steps=5,
train_instance_count=1, train_instance_type='ml.c4.xlarge',
train_max_run=45 * 60)
inputs = estimator.sagemaker_session.upload_data(path=dataset_path, key_prefix='data/cifar10')
estimator.fit(inputs)
endpoint_name = estimator.latest_training_job.name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.p2.xlarge')
data = np.random.randn(32, 32, 3)
predict_response = predictor.predict(data)
assert len(predict_response['outputs']['probabilities']['floatVal']) == 10