当前位置: 首页>>代码示例>>Python>>正文


Python utils.sagemaker_timestamp方法代码示例

本文整理汇总了Python中sagemaker.utils.sagemaker_timestamp方法的典型用法代码示例。如果您正苦于以下问题:Python utils.sagemaker_timestamp方法的具体用法?Python utils.sagemaker_timestamp怎么用?Python utils.sagemaker_timestamp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sagemaker.utils的用法示例。


在下文中一共展示了utils.sagemaker_timestamp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_deploy_model

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_deploy_model(pytorch_training_job, sagemaker_session, cpu_instance_type):
    endpoint_name = "test-pytorch-deploy-model-{}".format(sagemaker_timestamp())

    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
        desc = sagemaker_session.sagemaker_client.describe_training_job(
            TrainingJobName=pytorch_training_job
        )
        model_data = desc["ModelArtifacts"]["S3ModelArtifacts"]
        model = PyTorchModel(
            model_data,
            "SageMakerRole",
            entry_point=MNIST_SCRIPT,
            sagemaker_session=sagemaker_session,
        )
        predictor = model.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)

        batch_size = 100
        data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
        output = predictor.predict(data)

        assert output.shape == (batch_size, 10) 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:23,代码来源:test_pytorch_train.py

示例2: test_deploy_packed_model_with_entry_point_name

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_deploy_packed_model_with_entry_point_name(sagemaker_session, cpu_instance_type):
    endpoint_name = "test-pytorch-deploy-model-{}".format(sagemaker_timestamp())

    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
        model_data = sagemaker_session.upload_data(path=PACKED_MODEL)
        model = PyTorchModel(
            model_data,
            "SageMakerRole",
            entry_point="mnist.py",
            framework_version="1.4.0",
            sagemaker_session=sagemaker_session,
        )
        predictor = model.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)

        batch_size = 100
        data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
        output = predictor.predict(data)

        assert output.shape == (batch_size, 10) 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:21,代码来源:test_pytorch_train.py

示例3: test_deploy_model_with_accelerator

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_deploy_model_with_accelerator(sagemaker_session, cpu_instance_type):
    endpoint_name = "test-pytorch-deploy-eia-{}".format(sagemaker_timestamp())
    model_data = sagemaker_session.upload_data(path=EIA_MODEL)
    pytorch = PyTorchModel(
        model_data,
        "SageMakerRole",
        framework_version="1.3.1",
        entry_point=EIA_SCRIPT,
        sagemaker_session=sagemaker_session,
    )
    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
        predictor = pytorch.deploy(
            initial_instance_count=1,
            instance_type=cpu_instance_type,
            accelerator_type="ml.eia1.medium",
            endpoint_name=endpoint_name,
        )

        batch_size = 100
        data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
        output = predictor.predict(data)

        assert output.shape == (batch_size, 10) 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:25,代码来源:test_pytorch_train.py

示例4: test_coach_mxnet

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_coach_mxnet(sagemaker_session, rl_coach_mxnet_full_version, cpu_instance_type):
    estimator = _test_coach(
        sagemaker_session, RLFramework.MXNET, rl_coach_mxnet_full_version, cpu_instance_type
    )
    job_name = unique_name_from_base("test-coach-mxnet")

    with timeout(minutes=15):
        estimator.fit(wait="False", job_name=job_name)

        estimator = RLEstimator.attach(
            estimator.latest_training_job.name, sagemaker_session=sagemaker_session
        )

    endpoint_name = "test-mxnet-coach-deploy-{}".format(sagemaker_timestamp())

    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
        predictor = estimator.deploy(
            1, cpu_instance_type, entry_point="mxnet_deploy.py", endpoint_name=endpoint_name
        )

        observation = numpy.asarray([0, 0, 0, 0])
        action = predictor.predict(observation)

    assert 0 < action[0][0] < 1
    assert 0 < action[0][1] < 1 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:27,代码来源:test_rl.py

示例5: test_coach_tf

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_coach_tf(sagemaker_session, rl_coach_tf_full_version, cpu_instance_type):
    estimator = _test_coach(
        sagemaker_session, RLFramework.TENSORFLOW, rl_coach_tf_full_version, cpu_instance_type
    )
    job_name = unique_name_from_base("test-coach-tf")

    with timeout(minutes=15):
        estimator.fit(job_name=job_name)

    endpoint_name = "test-tf-coach-deploy-{}".format(sagemaker_timestamp())

    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
        predictor = estimator.deploy(1, cpu_instance_type)
        observation = numpy.asarray([0, 0, 0, 0])
        action = predictor.predict(observation)

    assert action == {"predictions": [[0.5, 0.5]]} 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:19,代码来源:test_rl.py

示例6: test_deploy_model

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_deploy_model(sklearn_training_job, sagemaker_session, cpu_instance_type):
    endpoint_name = "test-sklearn-deploy-model-{}".format(sagemaker_timestamp())
    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
        desc = sagemaker_session.sagemaker_client.describe_training_job(
            TrainingJobName=sklearn_training_job
        )
        model_data = desc["ModelArtifacts"]["S3ModelArtifacts"]
        script_path = os.path.join(DATA_DIR, "sklearn_mnist", "mnist.py")
        model = SKLearnModel(
            model_data,
            "SageMakerRole",
            entry_point=script_path,
            sagemaker_session=sagemaker_session,
        )
        predictor = model.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)
        _predict_and_assert(predictor) 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:18,代码来源:test_sklearn_train.py

示例7: test_training

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_training(sagemaker_session, image_uri, instance_type, instance_count):
    hyperparameters = {'sagemaker_parameter_server_enabled': True} if instance_count > 1 else {}
    hyperparameters['epochs'] = 1

    mx = MXNet(entry_point=SCRIPT_PATH,
               role='SageMakerRole',
               train_instance_count=instance_count,
               train_instance_type=instance_type,
               sagemaker_session=sagemaker_session,
               image_name=image_uri,
               hyperparameters=hyperparameters)

    with timeout(minutes=15):
        prefix = 'mxnet_mnist/{}'.format(utils.sagemaker_timestamp())
        train_input = mx.sagemaker_session.upload_data(path=os.path.join(DATA_PATH, 'train'),
                                                       key_prefix=prefix + '/train')
        test_input = mx.sagemaker_session.upload_data(path=os.path.join(DATA_PATH, 'test'),
                                                      key_prefix=prefix + '/test')

        job_name = utils.unique_name_from_base('test-mxnet-image')
        mx.fit({'train': train_input, 'test': test_input}, job_name=job_name) 
开发者ID:aws,项目名称:sagemaker-mxnet-training-toolkit,代码行数:23,代码来源:test_training.py

示例8: create_docker_services

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def create_docker_services(command, tmpdir, hosts, image, additional_volumes, additional_env_vars,
                           customer_script, source_dir, entrypoint, use_gpu=False):

    environment = []
    session = boto3.Session()

    optml_dirs = set()
    if command == 'train':
        optml_dirs = {'output', 'input'}

    elif command == 'serve':
        environment.extend(DEFAULT_HOSTING_ENV)

        if customer_script:
            timestamp = utils.sagemaker_timestamp()
            s3_script_path = fw_utils.tar_and_upload_dir(session=session,
                                                         bucket=default_bucket(session),
                                                         s3_key_prefix='test-{}'.format(timestamp),
                                                         script=customer_script,
                                                         directory=source_dir)[0]

            environment.extend([
                'SAGEMAKER_PROGRAM={}'.format(os.path.basename(customer_script)),
                'SAGEMAKER_SUBMIT_DIRECTORY={}'.format(s3_script_path)
            ])
    else:
        raise ValueError('Unexpected command: {}'.format(command))

    environment.extend(credentials_to_env(session))

    environment.extend(additional_env_vars)

    return {h: create_docker_host(tmpdir, h, image, environment, optml_dirs, command,
                                  additional_volumes, entrypoint, use_gpu) for h in hosts} 
开发者ID:aws,项目名称:sagemaker-xgboost-container,代码行数:36,代码来源:local_mode.py

示例9: test_inference_pipeline_batch_transform

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_inference_pipeline_batch_transform(sagemaker_session, cpu_instance_type):
    sparkml_model_data = sagemaker_session.upload_data(
        path=os.path.join(SPARKML_DATA_PATH, "mleap_model.tar.gz"),
        key_prefix="integ-test-data/sparkml/model",
    )
    xgb_model_data = sagemaker_session.upload_data(
        path=os.path.join(XGBOOST_DATA_PATH, "xgb_model.tar.gz"),
        key_prefix="integ-test-data/xgboost/model",
    )
    batch_job_name = "test-inference-pipeline-batch-{}".format(sagemaker_timestamp())
    sparkml_model = SparkMLModel(
        model_data=sparkml_model_data,
        env={"SAGEMAKER_SPARKML_SCHEMA": SCHEMA},
        sagemaker_session=sagemaker_session,
    )
    xgb_image = get_image_uri(sagemaker_session.boto_region_name, "xgboost")
    xgb_model = Model(
        model_data=xgb_model_data, image=xgb_image, sagemaker_session=sagemaker_session
    )
    model = PipelineModel(
        models=[sparkml_model, xgb_model],
        role="SageMakerRole",
        sagemaker_session=sagemaker_session,
        name=batch_job_name,
    )
    transformer = model.transformer(1, cpu_instance_type)
    transform_input_key_prefix = "integ-test-data/sparkml_xgboost/transform"
    transform_input = transformer.sagemaker_session.upload_data(
        path=VALID_DATA_PATH, key_prefix=transform_input_key_prefix
    )

    with timeout_and_delete_model_with_transformer(
        transformer, sagemaker_session, minutes=TRANSFORM_DEFAULT_TIMEOUT_MINUTES
    ):
        transformer.transform(
            transform_input, content_type=CONTENT_TYPE_CSV, job_name=batch_job_name
        )
        transformer.wait() 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:40,代码来源:test_inference_pipeline.py

示例10: test_byo_airflow_config_uploads_data_source_to_s3_when_inputs_provided

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_byo_airflow_config_uploads_data_source_to_s3_when_inputs_provided(
    sagemaker_session, cpu_instance_type
):
    with timeout(seconds=AIRFLOW_CONFIG_TIMEOUT_IN_SECONDS):
        training_data_path = os.path.join(DATA_DIR, "dummy_tensor")

        data_source_location = "test-airflow-config-{}".format(sagemaker_timestamp())
        inputs = sagemaker_session.upload_data(
            path=training_data_path, key_prefix=os.path.join(data_source_location, "train")
        )

        estimator = Estimator(
            image_name=get_image_uri(
                sagemaker_session.boto_session.region_name, "factorization-machines"
            ),
            role=ROLE,
            train_instance_count=SINGLE_INSTANCE_COUNT,
            train_instance_type=cpu_instance_type,
            sagemaker_session=sagemaker_session,
        )

        training_config = _build_airflow_workflow(
            estimator=estimator, instance_type=cpu_instance_type, inputs=inputs
        )

        _assert_that_s3_url_contains_data(
            sagemaker_session,
            training_config["InputDataConfig"][0]["DataSource"]["S3DataSource"]["S3Uri"],
        ) 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:31,代码来源:test_airflow_config.py

示例11: test_sparkml_model_deploy

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_sparkml_model_deploy(sagemaker_session, cpu_instance_type):
    # Uploads an MLeap serialized MLeap model to S3 and use that to deploy a SparkML model to perform inference
    data_path = os.path.join(DATA_DIR, "sparkml_model")
    endpoint_name = "test-sparkml-deploy-{}".format(sagemaker_timestamp())
    model_data = sagemaker_session.upload_data(
        path=os.path.join(data_path, "mleap_model.tar.gz"),
        key_prefix="integ-test-data/sparkml/model",
    )
    schema = json.dumps(
        {
            "input": [
                {"name": "Pclass", "type": "float"},
                {"name": "Embarked", "type": "string"},
                {"name": "Age", "type": "float"},
                {"name": "Fare", "type": "float"},
                {"name": "SibSp", "type": "float"},
                {"name": "Sex", "type": "string"},
            ],
            "output": {"name": "features", "struct": "vector", "type": "double"},
        }
    )
    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
        model = SparkMLModel(
            model_data=model_data,
            role="SageMakerRole",
            sagemaker_session=sagemaker_session,
            env={"SAGEMAKER_SPARKML_SCHEMA": schema},
        )
        predictor = model.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)

        valid_data = "1.0,C,38.0,71.5,1.0,female"
        assert predictor.predict(valid_data) == "1.0,0.0,38.0,1.0,71.5,0.0,1.0"

        invalid_data = "1.0,28.0,C,38.0,71.5,1.0"
        assert predictor.predict(invalid_data) is None 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:37,代码来源:test_sparkml_serving.py

示例12: container_image

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def container_image(sagemaker_session):
    """ Create a Multi-Model container image for use with integration testcases
    since 1P containers supporting multiple models are not available yet"""
    region = sagemaker_session.boto_region_name
    ecr_client = sagemaker_session.boto_session.client("ecr", region_name=region)
    sts_client = sagemaker_session.boto_session.client(
        "sts", region_name=region, endpoint_url=utils.sts_regional_endpoint(region)
    )
    account_id = sts_client.get_caller_identity()["Account"]
    algorithm_name = "sagemaker-multimodel-integ-test-{}".format(sagemaker_timestamp())
    ecr_image_uri_prefix = get_ecr_image_uri_prefix(account=account_id, region=region)
    ecr_image = "{prefix}/{algorithm_name}:latest".format(
        prefix=ecr_image_uri_prefix, algorithm_name=algorithm_name
    )

    # Build and tag docker image locally
    docker_client = docker.from_env()
    image, build_log = docker_client.images.build(
        path=os.path.join(DATA_DIR, "multimodel", "container"), tag=algorithm_name, rm=True
    )
    image.tag(ecr_image, tag="latest")

    # Create AWS ECR and push the local docker image to it
    _create_repository(ecr_client, algorithm_name)
    username, password = _ecr_login(ecr_client)
    # Retry docker image push
    for _ in retries(3, "Upload docker image to ECR repo", seconds_to_sleep=10):
        try:
            docker_client.images.push(
                ecr_image, auth_config={"username": username, "password": password}
            )
            break
        except requests.exceptions.ConnectionError:
            # This can happen when we try to create multiple repositories in parallel, so we retry
            pass

    yield ecr_image

    # Delete repository after the multi model integration tests complete
    _delete_repository(ecr_client, algorithm_name) 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:42,代码来源:test_multidatamodel.py

示例13: test_marketplace_estimator

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_marketplace_estimator(sagemaker_session, cpu_instance_type):
    with timeout(minutes=15):
        data_path = os.path.join(DATA_DIR, "marketplace", "training")
        region = sagemaker_session.boto_region_name
        account = REGION_ACCOUNT_MAP[region]
        algorithm_arn = ALGORITHM_ARN.format(
            partition=_aws_partition(region), region=region, account=account
        )

        algo = AlgorithmEstimator(
            algorithm_arn=algorithm_arn,
            role="SageMakerRole",
            train_instance_count=1,
            train_instance_type=cpu_instance_type,
            sagemaker_session=sagemaker_session,
        )

        train_input = algo.sagemaker_session.upload_data(
            path=data_path, key_prefix="integ-test-data/marketplace/train"
        )

        algo.fit({"training": train_input})

    endpoint_name = "test-marketplace-estimator{}".format(sagemaker_timestamp())
    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session, minutes=20):
        predictor = algo.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)
        shape = pandas.read_csv(os.path.join(data_path, "iris.csv"), header=None)

        a = [50 * i for i in range(3)]
        b = [40 + i for i in range(10)]
        indices = [i + j for i, j in itertools.product(a, b)]

        test_data = shape.iloc[indices[:-1]]
        test_x = test_data.iloc[:, 1:]

        print(predictor.predict(test_x.values).decode("utf-8")) 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:38,代码来源:test_marketplace.py

示例14: test_attach_deploy

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_attach_deploy(mxnet_training_job, sagemaker_session, cpu_instance_type):
    endpoint_name = "test-mxnet-attach-deploy-{}".format(sagemaker_timestamp())

    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
        estimator = MXNet.attach(mxnet_training_job, sagemaker_session=sagemaker_session)
        predictor = estimator.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)
        data = numpy.zeros(shape=(1, 1, 28, 28))
        result = predictor.predict(data)
        assert result is not None 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:11,代码来源:test_mxnet_train.py

示例15: test_deploy_model

# 需要导入模块: from sagemaker import utils [as 别名]
# 或者: from sagemaker.utils import sagemaker_timestamp [as 别名]
def test_deploy_model(mxnet_training_job, sagemaker_session, mxnet_full_version, cpu_instance_type):
    endpoint_name = "test-mxnet-deploy-model-{}".format(sagemaker_timestamp())

    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
        desc = sagemaker_session.sagemaker_client.describe_training_job(
            TrainingJobName=mxnet_training_job
        )
        model_data = desc["ModelArtifacts"]["S3ModelArtifacts"]
        script_path = os.path.join(DATA_DIR, "mxnet_mnist", "mnist.py")
        model = MXNetModel(
            model_data,
            "SageMakerRole",
            entry_point=script_path,
            py_version=PYTHON_VERSION,
            sagemaker_session=sagemaker_session,
            framework_version=mxnet_full_version,
        )
        predictor = model.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)

        data = numpy.zeros(shape=(1, 1, 28, 28))
        result = predictor.predict(data)
        assert result is not None

    predictor.delete_model()
    with pytest.raises(Exception) as exception:
        sagemaker_session.sagemaker_client.describe_model(ModelName=model.name)
        assert "Could not find model" in str(exception.value) 
开发者ID:aws,项目名称:sagemaker-python-sdk,代码行数:29,代码来源:test_mxnet_train.py


注:本文中的sagemaker.utils.sagemaker_timestamp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。