本文整理汇总了Python中stopit.ThreadingTimeout方法的典型用法代码示例。如果您正苦于以下问题:Python stopit.ThreadingTimeout方法的具体用法?Python stopit.ThreadingTimeout怎么用?Python stopit.ThreadingTimeout使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类stopit
的用法示例。
在下文中一共展示了stopit.ThreadingTimeout方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: timeout
# 需要导入模块: import stopit [as 别名]
# 或者: from stopit import ThreadingTimeout [as 别名]
def timeout(seconds=0, minutes=0, hours=0):
"""
Add a signal-based timeout to any block of code.
If multiple time units are specified, they will be added together to determine time limit.
Usage:
with timeout(seconds=5):
my_slow_function(...)
Args:
- seconds: The time limit, in seconds.
- minutes: The time limit, in minutes.
- hours: The time limit, in hours.
"""
limit = seconds + 60 * minutes + 3600 * hours
with stopit.ThreadingTimeout(limit, swallow_exc=False) as t:
yield [t]
示例2: timeout_and_delete_endpoint_by_name
# 需要导入模块: import stopit [as 别名]
# 或者: from stopit import ThreadingTimeout [as 别名]
def timeout_and_delete_endpoint_by_name(
endpoint_name,
sagemaker_session,
seconds=0,
minutes=45,
hours=0,
sleep_between_cleanup_attempts=10,
):
limit = seconds + 60 * minutes + 3600 * hours
with stopit.ThreadingTimeout(limit, swallow_exc=False) as t:
no_errors = False
try:
yield [t]
no_errors = True
finally:
attempts = 3
while attempts > 0:
attempts -= 1
try:
_delete_schedules_associated_with_endpoint(
sagemaker_session=sagemaker_session, endpoint_name=endpoint_name
)
sagemaker_session.delete_endpoint(endpoint_name)
LOGGER.info("deleted endpoint {}".format(endpoint_name))
_show_logs(endpoint_name, "Endpoints", sagemaker_session)
if no_errors:
_cleanup_logs(endpoint_name, "Endpoints", sagemaker_session)
break
except ClientError as ce:
if ce.response["Error"]["Code"] == "ValidationException":
# avoids the inner exception to be overwritten
pass
# trying to delete the resource again in 10 seconds
sleep(sleep_between_cleanup_attempts)
示例3: timeout_and_delete_model_with_transformer
# 需要导入模块: import stopit [as 别名]
# 或者: from stopit import ThreadingTimeout [as 别名]
def timeout_and_delete_model_with_transformer(
transformer, sagemaker_session, seconds=0, minutes=0, hours=0, sleep_between_cleanup_attempts=10
):
limit = seconds + 60 * minutes + 3600 * hours
with stopit.ThreadingTimeout(limit, swallow_exc=False) as t:
no_errors = False
try:
yield [t]
no_errors = True
finally:
attempts = 3
while attempts > 0:
attempts -= 1
try:
transformer.delete_model()
LOGGER.info("deleted SageMaker model {}".format(transformer.model_name))
_show_logs(transformer.model_name, "Models", sagemaker_session)
if no_errors:
_cleanup_logs(transformer.model_name, "Models", sagemaker_session)
break
except ClientError as ce:
if ce.response["Error"]["Code"] == "ValidationException":
pass
sleep(sleep_between_cleanup_attempts)
示例4: test_tf_local_mode
# 需要导入模块: import stopit [as 别名]
# 或者: from stopit import ThreadingTimeout [as 别名]
def test_tf_local_mode(sagemaker_local_session):
with stopit.ThreadingTimeout(5 * 60, swallow_exc=False):
script_path = os.path.join(DATA_DIR, "iris", "iris-dnn-classifier.py")
estimator = TensorFlow(
entry_point=script_path,
role="SageMakerRole",
framework_version="1.12",
training_steps=1,
evaluation_steps=1,
hyperparameters={"input_tensor_name": "inputs"},
train_instance_count=1,
train_instance_type="local",
base_job_name="test-tf",
sagemaker_session=sagemaker_local_session,
)
inputs = estimator.sagemaker_session.upload_data(
path=DATA_PATH, key_prefix="integ-test-data/tf_iris"
)
estimator.fit(inputs)
print("job succeeded: {}".format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with lock.lock(LOCK_PATH):
try:
json_predictor = estimator.deploy(
initial_instance_count=1, instance_type="local", endpoint_name=endpoint_name
)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({"inputs": features})
print("predict result: {}".format(dict_result))
list_result = json_predictor.predict(features)
print("predict result: {}".format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
示例5: test_tf_distributed_local_mode
# 需要导入模块: import stopit [as 别名]
# 或者: from stopit import ThreadingTimeout [as 别名]
def test_tf_distributed_local_mode(sagemaker_local_session):
with stopit.ThreadingTimeout(5 * 60, swallow_exc=False):
script_path = os.path.join(DATA_DIR, "iris", "iris-dnn-classifier.py")
estimator = TensorFlow(
entry_point=script_path,
role="SageMakerRole",
framework_version="1.12",
training_steps=1,
evaluation_steps=1,
hyperparameters={"input_tensor_name": "inputs"},
train_instance_count=3,
train_instance_type="local",
base_job_name="test-tf",
sagemaker_session=sagemaker_local_session,
)
inputs = "file://" + DATA_PATH
estimator.fit(inputs)
print("job succeeded: {}".format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with lock.lock(LOCK_PATH):
try:
json_predictor = estimator.deploy(
initial_instance_count=1, instance_type="local", endpoint_name=endpoint_name
)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({"inputs": features})
print("predict result: {}".format(dict_result))
list_result = json_predictor.predict(features)
print("predict result: {}".format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
示例6: test_tf_local_data_local_script
# 需要导入模块: import stopit [as 别名]
# 或者: from stopit import ThreadingTimeout [as 别名]
def test_tf_local_data_local_script():
with stopit.ThreadingTimeout(5 * 60, swallow_exc=False):
script_path = os.path.join(DATA_DIR, "iris", "iris-dnn-classifier.py")
estimator = TensorFlow(
entry_point=script_path,
role="SageMakerRole",
framework_version="1.12",
training_steps=1,
evaluation_steps=1,
hyperparameters={"input_tensor_name": "inputs"},
train_instance_count=1,
train_instance_type="local",
base_job_name="test-tf",
sagemaker_session=LocalNoS3Session(),
)
inputs = "file://" + DATA_PATH
estimator.fit(inputs)
print("job succeeded: {}".format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with lock.lock(LOCK_PATH):
try:
json_predictor = estimator.deploy(
initial_instance_count=1, instance_type="local", endpoint_name=endpoint_name
)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({"inputs": features})
print("predict result: {}".format(dict_result))
list_result = json_predictor.predict(features)
print("predict result: {}".format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
示例7: timed
# 需要导入模块: import stopit [as 别名]
# 或者: from stopit import ThreadingTimeout [as 别名]
def timed(timeout):
def outer_wrapper(f):
@wraps(f)
def wrapper(*args, **kwargs):
with stopit.ThreadingTimeout(timeout) as to_ctx_mgr:
f(*args, **kwargs)
if to_ctx_mgr.state != to_ctx_mgr.EXECUTED:
raise Exception("Test function timed out.")
return wrapper
return outer_wrapper
示例8: __call__
# 需要导入模块: import stopit [as 别名]
# 或者: from stopit import ThreadingTimeout [as 别名]
def __call__(self, worker: Stage, **kwargs):
for x in worker.iter_dependencies():
with (
stopit.ThreadingTimeout(worker.timeout)
if worker.timeout
else utils.NoOpContext()
):
yield from self.apply(worker, x, **kwargs)
示例9: test_local_transform_mxnet
# 需要导入模块: import stopit [as 别名]
# 或者: from stopit import ThreadingTimeout [as 别名]
def test_local_transform_mxnet(
sagemaker_local_session, tmpdir, mxnet_full_version, cpu_instance_type
):
data_path = os.path.join(DATA_DIR, "mxnet_mnist")
script_path = os.path.join(data_path, "mnist.py")
mx = MXNet(
entry_point=script_path,
role="SageMakerRole",
train_instance_count=1,
train_instance_type="local",
framework_version=mxnet_full_version,
sagemaker_session=sagemaker_local_session,
)
train_input = mx.sagemaker_session.upload_data(
path=os.path.join(data_path, "train"), key_prefix="integ-test-data/mxnet_mnist/train"
)
test_input = mx.sagemaker_session.upload_data(
path=os.path.join(data_path, "test"), key_prefix="integ-test-data/mxnet_mnist/test"
)
with stopit.ThreadingTimeout(5 * 60, swallow_exc=False):
mx.fit({"train": train_input, "test": test_input})
transform_input_path = os.path.join(data_path, "transform")
transform_input_key_prefix = "integ-test-data/mxnet_mnist/transform"
transform_input = mx.sagemaker_session.upload_data(
path=transform_input_path, key_prefix=transform_input_key_prefix
)
output_path = "file://%s" % (str(tmpdir))
transformer = mx.transformer(
1,
"local",
assemble_with="Line",
max_payload=1,
strategy="SingleRecord",
output_path=output_path,
)
with lock.lock(LOCK_PATH):
transformer.transform(transform_input, content_type="text/csv", split_type="Line")
transformer.wait()
assert os.path.exists(os.path.join(str(tmpdir), "data.csv.out"))