本文整理汇总了Python中tests.integ.timeout.timeout函数的典型用法代码示例。如果您正苦于以下问题:Python timeout函数的具体用法?Python timeout怎么用?Python timeout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了timeout函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_lda
def test_lda(sagemaker_session):
with timeout(minutes=15):
data_path = os.path.join(DATA_DIR, 'lda')
data_filename = 'nips-train_1.pbr'
with open(os.path.join(data_path, data_filename), 'rb') as f:
all_records = read_records(f)
# all records must be same
feature_num = int(all_records[0].features['values'].float32_tensor.shape[0])
lda = LDA(role='SageMakerRole', train_instance_type='ml.c4.xlarge', num_topics=10,
sagemaker_session=sagemaker_session, base_job_name='test-lda')
record_set = prepare_record_set_from_local_files(data_path, lda.data_location,
len(all_records), feature_num, sagemaker_session)
lda.fit(record_set, 100)
endpoint_name = name_from_base('lda')
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
model = LDAModel(lda.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session)
predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name)
predict_input = np.random.rand(1, feature_num)
result = predictor.predict(predict_input)
assert len(result) == 1
for record in result:
assert record.label["topic_mixture"] is not None
示例2: test_cifar
def test_cifar(sagemaker_session, tf_full_version):
with timeout(minutes=45):
script_path = os.path.join(DATA_DIR, 'cifar_10', 'source')
dataset_path = os.path.join(DATA_DIR, 'cifar_10', 'data')
estimator = TensorFlow(entry_point='resnet_cifar_10.py', source_dir=script_path, role='SageMakerRole',
framework_version=tf_full_version, training_steps=500, evaluation_steps=5,
train_instance_count=2, train_instance_type='ml.p2.xlarge',
sagemaker_session=sagemaker_session, train_max_run=45 * 60,
base_job_name='test-cifar')
inputs = estimator.sagemaker_session.upload_data(path=dataset_path, key_prefix='data/cifar10')
estimator.fit(inputs, logs=False)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.p2.xlarge')
predictor.serializer = PickleSerializer()
predictor.content_type = PICKLE_CONTENT_TYPE
data = np.random.randn(32, 32, 3)
predict_response = predictor.predict(data)
assert len(predict_response['outputs']['probabilities']['floatVal']) == 10
示例3: test_async_fit
def test_async_fit(sagemaker_session):
endpoint_name = 'test-mxnet-attach-deploy-{}'.format(sagemaker_timestamp())
with timeout(minutes=5):
script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'mnist.py')
data_path = os.path.join(DATA_DIR, 'mxnet_mnist')
mx = MXNet(entry_point=script_path, role='SageMakerRole',
train_instance_count=1, train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session)
train_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'train'),
key_prefix='integ-test-data/mxnet_mnist/train')
test_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'test'),
key_prefix='integ-test-data/mxnet_mnist/test')
mx.fit({'train': train_input, 'test': test_input}, wait=False)
training_job_name = mx.latest_training_job.name
print("Waiting to re-attach to the training job: %s" % training_job_name)
time.sleep(20)
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
print("Re-attaching now to: %s" % training_job_name)
estimator = MXNet.attach(training_job_name=training_job_name, sagemaker_session=sagemaker_session)
predictor = estimator.deploy(1, 'ml.m4.xlarge', endpoint_name=endpoint_name)
data = numpy.zeros(shape=(1, 1, 28, 28))
predictor.predict(data)
示例4: test_factorization_machines
def test_factorization_machines(sagemaker_session):
with timeout(minutes=15):
data_path = os.path.join(DATA_DIR, 'one_p_mnist', 'mnist.pkl.gz')
pickle_args = {} if sys.version_info.major == 2 else {'encoding': 'latin1'}
# Load the data into memory as numpy arrays
with gzip.open(data_path, 'rb') as f:
train_set, _, _ = pickle.load(f, **pickle_args)
fm = FactorizationMachines(role='SageMakerRole', train_instance_count=1,
train_instance_type='ml.c4.xlarge',
num_factors=10, predictor_type='regressor',
epochs=2, clip_gradient=1e2, eps=0.001, rescale_grad=1.0 / 100,
sagemaker_session=sagemaker_session, base_job_name='test-fm')
# training labels must be 'float32'
fm.fit(fm.record_set(train_set[0][:200], train_set[1][:200].astype('float32')))
endpoint_name = name_from_base('fm')
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
model = FactorizationMachinesModel(fm.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session)
predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name)
result = predictor.predict(train_set[0][:10])
assert len(result) == 10
for record in result:
assert record.label["score"] is not None
示例5: test_tf_async
def test_tf_async(sagemaker_session):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
base_job_name='test-tf')
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf_iris')
estimator.fit(inputs, wait=False)
training_job_name = estimator.latest_training_job.name
time.sleep(20)
endpoint_name = training_job_name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
estimator = TensorFlow.attach(training_job_name=training_job_name, sagemaker_session=sagemaker_session)
json_predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.c4.xlarge',
endpoint_name=endpoint_name)
result = json_predictor.predict([6.4, 3.2, 4.5, 1.5])
print('predict result: {}'.format(result))
示例6: test_tuning_mxnet
def test_tuning_mxnet(sagemaker_session):
with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'tuning.py')
data_path = os.path.join(DATA_DIR, 'mxnet_mnist')
estimator = MXNet(entry_point=script_path,
role='SageMakerRole',
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
sagemaker_session=sagemaker_session,
base_job_name='tune-mxnet')
hyperparameter_ranges = {'learning_rate': ContinuousParameter(0.01, 0.2)}
objective_metric_name = 'Validation-accuracy'
metric_definitions = [{'Name': 'Validation-accuracy', 'Regex': 'Validation-accuracy=([0-9\\.]+)'}]
tuner = HyperparameterTuner(estimator, objective_metric_name, hyperparameter_ranges, metric_definitions,
max_jobs=4, max_parallel_jobs=2)
train_input = estimator.sagemaker_session.upload_data(path=os.path.join(data_path, 'train'),
key_prefix='integ-test-data/mxnet_mnist/train')
test_input = estimator.sagemaker_session.upload_data(path=os.path.join(data_path, 'test'),
key_prefix='integ-test-data/mxnet_mnist/test')
tuner.fit({'train': train_input, 'test': test_input})
print('Started hyperparameter tuning job with name:' + tuner.latest_tuning_job.name)
time.sleep(15)
tuner.wait()
best_training_job = tuner.best_training_job()
with timeout_and_delete_endpoint_by_name(best_training_job, sagemaker_session):
predictor = tuner.deploy(1, 'ml.c4.xlarge')
data = np.zeros(shape=(1, 1, 28, 28))
predictor.predict(data)
示例7: test_knn_regressor
def test_knn_regressor(sagemaker_session):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
data_path = os.path.join(DATA_DIR, 'one_p_mnist', 'mnist.pkl.gz')
pickle_args = {} if sys.version_info.major == 2 else {'encoding': 'latin1'}
# Load the data into memory as numpy arrays
with gzip.open(data_path, 'rb') as f:
train_set, _, _ = pickle.load(f, **pickle_args)
knn = KNN(role='SageMakerRole', train_instance_count=1,
train_instance_type='ml.c4.xlarge',
k=10, predictor_type='regressor', sample_size=500,
sagemaker_session=sagemaker_session, base_job_name='test-knn-rr')
# training labels must be 'float32'
knn.fit(knn.record_set(train_set[0][:200], train_set[1][:200].astype('float32')))
endpoint_name = name_from_base('knn')
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
model = KNNModel(knn.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session)
predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name)
result = predictor.predict(train_set[0][:10])
assert len(result) == 10
for record in result:
assert record.label["score"] is not None
示例8: test_tf
def test_tf(sagemaker_session, tf_full_version):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
base_job_name='test-tf')
inputs = sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf_iris')
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
json_predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.c4.xlarge',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
示例9: test_async_fit_deploy
def test_async_fit_deploy(sagemaker_session, pytorch_full_version):
training_job_name = ""
# TODO: add tests against local mode when it's ready to be used
instance_type = 'ml.p2.xlarge'
with timeout(minutes=10):
pytorch = _get_pytorch_estimator(sagemaker_session, pytorch_full_version, instance_type)
pytorch.fit({'training': _upload_training_data(pytorch)}, wait=False)
training_job_name = pytorch.latest_training_job.name
print("Waiting to re-attach to the training job: %s" % training_job_name)
time.sleep(20)
if not _is_local_mode(instance_type):
endpoint_name = 'test-pytorch-async-fit-attach-deploy-{}'.format(sagemaker_timestamp())
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
print("Re-attaching now to: %s" % training_job_name)
estimator = PyTorch.attach(training_job_name=training_job_name, sagemaker_session=sagemaker_session)
predictor = estimator.deploy(1, instance_type, endpoint_name=endpoint_name)
batch_size = 100
data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
output = predictor.predict(data)
assert output.shape == (batch_size, 10)
示例10: test_pca
def test_pca(sagemaker_session):
with timeout(minutes=15):
data_path = os.path.join(DATA_DIR, 'one_p_mnist', 'mnist.pkl.gz')
pickle_args = {} if sys.version_info.major == 2 else {'encoding': 'latin1'}
# Load the data into memory as numpy arrays
with gzip.open(data_path, 'rb') as f:
train_set, _, _ = pickle.load(f, **pickle_args)
pca = sagemaker.amazon.pca.PCA(role='SageMakerRole', train_instance_count=1,
train_instance_type='ml.m4.xlarge',
num_components=48, sagemaker_session=sagemaker_session, base_job_name='test-pca')
pca.algorithm_mode = 'randomized'
pca.subtract_mean = True
pca.extra_components = 5
pca.fit(pca.record_set(train_set[0][:100]))
endpoint_name = name_from_base('pca')
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
pca_model = sagemaker.amazon.pca.PCAModel(model_data=pca.model_data, role='SageMakerRole',
sagemaker_session=sagemaker_session)
predictor = pca_model.deploy(initial_instance_count=1, instance_type="ml.c4.xlarge",
endpoint_name=endpoint_name)
result = predictor.predict(train_set[0][:5])
assert len(result) == 5
for record in result:
assert record.label["projection"] is not None
示例11: test_linear_learner_multiclass
def test_linear_learner_multiclass(sagemaker_session):
with timeout(minutes=15):
data_path = os.path.join(DATA_DIR, 'one_p_mnist', 'mnist.pkl.gz')
pickle_args = {} if sys.version_info.major == 2 else {'encoding': 'latin1'}
# Load the data into memory as numpy arrays
with gzip.open(data_path, 'rb') as f:
train_set, _, _ = pickle.load(f, **pickle_args)
train_set = train_set[0], train_set[1].astype(np.dtype('float32'))
ll = LinearLearner('SageMakerRole', 1, 'ml.c4.2xlarge', base_job_name='test-linear-learner',
predictor_type='multiclass_classifier', num_classes=10, sagemaker_session=sagemaker_session)
ll.epochs = 1
ll.fit(ll.record_set(train_set[0][:200], train_set[1][:200]))
endpoint_name = name_from_base('linear-learner')
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = ll.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name)
result = predictor.predict(train_set[0][0:100])
assert len(result) == 100
for record in result:
assert record.label["predicted_label"] is not None
assert record.label["score"] is not None
示例12: fixture_training_job
def fixture_training_job(sagemaker_session, pytorch_full_version):
instance_type = 'ml.c4.xlarge'
with timeout(minutes=15):
pytorch = _get_pytorch_estimator(sagemaker_session, pytorch_full_version, instance_type)
pytorch.fit({'training': _upload_training_data(pytorch)})
return pytorch.latest_training_job.name
示例13: test_ntm
def test_ntm(sagemaker_session):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
data_path = os.path.join(DATA_DIR, 'ntm')
data_filename = 'nips-train_1.pbr'
with open(os.path.join(data_path, data_filename), 'rb') as f:
all_records = read_records(f)
# all records must be same
feature_num = int(all_records[0].features['values'].float32_tensor.shape[0])
ntm = NTM(role='SageMakerRole', train_instance_count=1, train_instance_type='ml.c4.xlarge', num_topics=10,
sagemaker_session=sagemaker_session, base_job_name='test-ntm')
record_set = prepare_record_set_from_local_files(data_path, ntm.data_location,
len(all_records), feature_num, sagemaker_session)
ntm.fit(record_set, None)
endpoint_name = name_from_base('ntm')
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
model = NTMModel(ntm.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session)
predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name)
predict_input = np.random.rand(1, feature_num)
result = predictor.predict(predict_input)
assert len(result) == 1
for record in result:
assert record.label["topic_weights"] is not None
示例14: test_failed_tf_training
def test_failed_tf_training(sagemaker_session, tf_full_version):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, 'iris', 'failure_script.py')
ec2_client = sagemaker_session.boto_session.client('ec2')
subnet, security_group_id = get_or_create_subnet_and_security_group(ec2_client, VPC_NAME)
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
subnets=[subnet],
security_group_ids=[security_group_id])
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH, key_prefix='integ-test-data/tf-failure')
with pytest.raises(ValueError) as e:
estimator.fit(inputs)
assert 'This failure is expected' in str(e.value)
job_desc = estimator.sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=estimator.latest_training_job.name)
assert [subnet] == job_desc['VpcConfig']['Subnets']
assert [security_group_id] == job_desc['VpcConfig']['SecurityGroupIds']
示例15: test_failed_training_job
def test_failed_training_job(sagemaker_session, pytorch_full_version):
script_path = os.path.join(MNIST_DIR, 'failure_script.py')
with timeout(minutes=15):
pytorch = _get_pytorch_estimator(sagemaker_session, pytorch_full_version, entry_point=script_path)
with pytest.raises(ValueError) as e:
pytorch.fit(_upload_training_data(pytorch))
assert 'This failure is expected' in str(e.value)