本文整理汇总了Python中test_utils.system.unique_resource_id函数的典型用法代码示例。如果您正苦于以下问题:Python unique_resource_id函数的具体用法?Python unique_resource_id怎么用?Python unique_resource_id使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unique_resource_id函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_bigtable_delete_instance
def test_bigtable_delete_instance():
# [START bigtable_delete_instance]
from google.cloud.bigtable import Client
client = Client(admin=True)
instance_id_to_delete = "inst-my-" + unique_resource_id("-")
# [END bigtable_delete_instance]
cluster_id = "clus-my-" + unique_resource_id("-")
instance = client.instance(
instance_id_to_delete, instance_type=PRODUCTION, labels=LABELS
)
cluster = instance.cluster(
cluster_id,
location_id=ALT_LOCATION_ID,
serve_nodes=SERVER_NODES,
default_storage_type=STORAGE_TYPE,
)
operation = instance.create(clusters=[cluster])
# We want to make sure the operation completes.
operation.result(timeout=100)
# [START bigtable_delete_instance]
instance_to_delete = client.instance(instance_id_to_delete)
instance_to_delete.delete()
# [END bigtable_delete_instance]
assert not instance_to_delete.exists()
示例2: test_bigtable_create_instance
def test_bigtable_create_instance():
# [START bigtable_create_prod_instance]
from google.cloud.bigtable import Client
from google.cloud.bigtable import enums
my_instance_id = "inst-my-" + unique_resource_id("-")
my_cluster_id = "clus-my-" + unique_resource_id("-")
location_id = "us-central1-f"
serve_nodes = 3
storage_type = enums.StorageType.SSD
production = enums.Instance.Type.PRODUCTION
labels = {"prod-label": "prod-label"}
client = Client(admin=True)
instance = client.instance(my_instance_id, instance_type=production, labels=labels)
cluster = instance.cluster(
my_cluster_id,
location_id=location_id,
serve_nodes=serve_nodes,
default_storage_type=storage_type,
)
operation = instance.create(clusters=[cluster])
# We want to make sure the operation completes.
operation.result(timeout=100)
# [END bigtable_create_prod_instance]
assert instance.exists()
instance.delete()
示例3: test_create_bucket
def test_create_bucket(self):
new_bucket_name = 'a-new-bucket' + unique_resource_id('-')
self.assertRaises(exceptions.NotFound,
Config.CLIENT.get_bucket, new_bucket_name)
created = retry_429(Config.CLIENT.create_bucket)(new_bucket_name)
self.case_buckets_to_delete.append(new_bucket_name)
self.assertEqual(created.name, new_bucket_name)
示例4: test_document_set_merge
def test_document_set_merge(client, cleanup):
document_id = "for-set" + unique_resource_id("-")
document = client.document("i-did-it", document_id)
# Add to clean-up before API request (in case ``set()`` fails).
cleanup(document)
# 0. Make sure the document doesn't exist yet
snapshot = document.get()
assert not snapshot.exists
# 1. Use ``create()`` to create the document.
data1 = {"name": "Sam", "address": {"city": "SF", "state": "CA"}}
write_result1 = document.create(data1)
snapshot1 = document.get()
assert snapshot1.to_dict() == data1
# Make sure the update is what created the document.
assert snapshot1.create_time == snapshot1.update_time
assert snapshot1.update_time == write_result1.update_time
# 2. Call ``set()`` to merge
data2 = {"address": {"city": "LA"}}
write_result2 = document.set(data2, merge=True)
snapshot2 = document.get()
assert snapshot2.to_dict() == {
"name": "Sam",
"address": {"city": "LA", "state": "CA"},
}
# Make sure the create time hasn't changed.
assert snapshot2.create_time == snapshot1.create_time
assert snapshot2.update_time == write_result2.update_time
示例5: test_document_set
def test_document_set(client, cleanup):
document_id = "for-set" + unique_resource_id("-")
document = client.document("i-did-it", document_id)
# Add to clean-up before API request (in case ``set()`` fails).
cleanup(document)
# 0. Make sure the document doesn't exist yet
snapshot = document.get()
assert snapshot.to_dict() is None
# 1. Use ``create()`` to create the document.
data1 = {"foo": 88}
write_result1 = document.create(data1)
snapshot1 = document.get()
assert snapshot1.to_dict() == data1
# Make sure the update is what created the document.
assert snapshot1.create_time == snapshot1.update_time
assert snapshot1.update_time == write_result1.update_time
# 2. Call ``set()`` again to overwrite.
data2 = {"bar": None}
write_result2 = document.set(data2)
snapshot2 = document.get()
assert snapshot2.to_dict() == data2
# Make sure the create time hasn't changed.
assert snapshot2.create_time == snapshot1.create_time
assert snapshot2.update_time == write_result2.update_time
示例6: test_update_type
def test_update_type(self):
from google.cloud.bigtable.enums import Instance
_DEVELOPMENT = Instance.Type.DEVELOPMENT
_PRODUCTION = Instance.Type.PRODUCTION
ALT_INSTANCE_ID = "ndif" + unique_resource_id("-")
instance = Config.CLIENT.instance(
ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS
)
operation = instance.create(location_id=LOCATION_ID, serve_nodes=None)
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(timeout=10)
# Unset the display_name
instance.display_name = None
instance.type_ = _PRODUCTION
operation = instance.update()
# We want to make sure the operation completes.
operation.result(timeout=10)
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID)
self.assertIsNone(instance_alt.type_)
instance_alt.reload()
self.assertEqual(instance_alt.type_, _PRODUCTION)
示例7: test_create_instance_defaults
def test_create_instance_defaults(self):
from google.cloud.bigtable import enums
ALT_INSTANCE_ID = "ndef" + unique_resource_id("-")
instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS)
ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster"
cluster = instance.cluster(
ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES
)
operation = instance.create(clusters=[cluster])
# We want to make sure the operation completes.
operation.result(timeout=10)
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
# Make sure that by default a PRODUCTION type instance is created
self.assertIsNone(instance.type_)
self.assertEqual(instance_alt.type_, enums.Instance.Type.PRODUCTION)
示例8: test_watch_collection
def test_watch_collection(client, cleanup):
db = client
doc_ref = db.collection(u"users").document(u"alovelace" + unique_resource_id())
collection_ref = db.collection(u"users")
# Initial setting
doc_ref.set({u"first": u"Jane", u"last": u"Doe", u"born": 1900})
# Setup listener
def on_snapshot(docs, changes, read_time):
on_snapshot.called_count += 1
for doc in [doc for doc in docs if doc.id == doc_ref.id]:
on_snapshot.born = doc.get("born")
on_snapshot.called_count = 0
on_snapshot.born = 0
collection_ref.on_snapshot(on_snapshot)
# delay here so initial on_snapshot occurs and isn't combined with set
sleep(1)
doc_ref.set({u"first": u"Ada", u"last": u"Lovelace", u"born": 1815})
for _ in range(10):
if on_snapshot.born == 1815:
break
sleep(1)
if on_snapshot.born != 1815:
raise AssertionError(
"Expected the last document update to update born: " + str(on_snapshot.born)
)
示例9: test_collection_group_queries
def test_collection_group_queries(client, cleanup):
collection_group = "b" + unique_resource_id("-")
doc_paths = [
"abc/123/" + collection_group + "/cg-doc1",
"abc/123/" + collection_group + "/cg-doc2",
collection_group + "/cg-doc3",
collection_group + "/cg-doc4",
"def/456/" + collection_group + "/cg-doc5",
collection_group + "/virtual-doc/nested-coll/not-cg-doc",
"x" + collection_group + "/not-cg-doc",
collection_group + "x/not-cg-doc",
"abc/123/" + collection_group + "x/not-cg-doc",
"abc/123/x" + collection_group + "/not-cg-doc",
"abc/" + collection_group,
]
batch = client.batch()
for doc_path in doc_paths:
doc_ref = client.document(doc_path)
batch.set(doc_ref, {"x": 1})
batch.commit()
query = client.collection_group(collection_group)
snapshots = list(query.stream())
found = [snapshot.id for snapshot in snapshots]
expected = ["cg-doc1", "cg-doc2", "cg-doc3", "cg-doc4", "cg-doc5"]
assert found == expected
示例10: test_document_integer_field
def test_document_integer_field(client, cleanup):
document_id = 'for-set' + unique_resource_id('-')
document = client.document('i-did-it', document_id)
# Add to clean-up before API request (in case ``set()`` fails).
cleanup(document)
data1 = {
'1a': {
'2b': '3c',
'ab': '5e'},
'6f': {
'7g': '8h',
'cd': '0j'}
}
document.create(data1)
data2 = {'1a.ab': '4d', '6f.7g': '9h'}
option2 = client.write_option(exists=True)
document.update(data2, option=option2)
snapshot = document.get()
expected = {
'1a': {
'2b': '3c',
'ab': '4d'},
'6f': {
'7g': '9h',
'cd': '0j'}
}
assert snapshot.to_dict() == expected
示例11: test_create_sink_pubsub_topic
def test_create_sink_pubsub_topic(self):
from google.cloud import pubsub_v1
SINK_NAME = 'test-create-sink-topic%s' % (_RESOURCE_ID,)
TOPIC_NAME = 'logging-systest{}'.format(unique_resource_id('-'))
# Create the destination topic, and set up the IAM policy to allow
# Stackdriver Logging to write into it.
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(Config.CLIENT.project, TOPIC_NAME)
self.to_delete.append(_DeleteWrapper(publisher, topic_path))
publisher.create_topic(topic_path)
policy = publisher.get_iam_policy(topic_path)
policy.bindings.add(
role='roles/owner',
members=['group:[email protected]']
)
publisher.set_iam_policy(topic_path, policy)
TOPIC_URI = 'pubsub.googleapis.com/%s' % (topic_path,)
sink = Config.CLIENT.sink(SINK_NAME, DEFAULT_FILTER, TOPIC_URI)
self.assertFalse(sink.exists())
sink.create()
self.to_delete.append(sink)
self.assertTrue(sink.exists())
示例12: test_create_sink_pubsub_topic
def test_create_sink_pubsub_topic(self):
from google.cloud import pubsub_v1
SINK_NAME = "test-create-sink-topic%s" % (_RESOURCE_ID,)
TOPIC_NAME = "logging-systest{}".format(unique_resource_id("-"))
# Create the destination topic, and set up the IAM policy to allow
# Stackdriver Logging to write into it.
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(Config.CLIENT.project, TOPIC_NAME)
self.to_delete.append(_DeleteWrapper(publisher, topic_path))
publisher.create_topic(topic_path)
policy = publisher.get_iam_policy(topic_path)
policy.bindings.add(role="roles/owner", members=["group:[email protected]"])
publisher.set_iam_policy(topic_path, policy)
TOPIC_URI = "pubsub.googleapis.com/%s" % (topic_path,)
retry = RetryErrors((Conflict, ServiceUnavailable), max_tries=10)
sink = Config.CLIENT.sink(SINK_NAME, DEFAULT_FILTER, TOPIC_URI)
self.assertFalse(sink.exists())
retry(sink.create)()
self.to_delete.append(sink)
self.assertTrue(sink.exists())
示例13: test_rewrite_rotate_with_user_project
def test_rewrite_rotate_with_user_project(self):
BLOB_NAME = 'rotating-keys'
file_data = self.FILES['simple']
new_bucket_name = 'rewrite-rotate-up' + unique_resource_id('-')
created = Config.CLIENT.create_bucket(
new_bucket_name, requester_pays=True)
try:
with_user_project = Config.CLIENT.bucket(
new_bucket_name, user_project=USER_PROJECT)
SOURCE_KEY = os.urandom(32)
source = with_user_project.blob(
BLOB_NAME, encryption_key=SOURCE_KEY)
source.upload_from_filename(file_data['path'])
source_data = source.download_as_string()
DEST_KEY = os.urandom(32)
dest = with_user_project.blob(BLOB_NAME, encryption_key=DEST_KEY)
token, rewritten, total = dest.rewrite(source)
self.assertEqual(token, None)
self.assertEqual(rewritten, len(source_data))
self.assertEqual(total, len(source_data))
self.assertEqual(dest.download_as_string(), source_data)
finally:
retry_429(created.delete)(force=True)
示例14: setUpModule
def setUpModule():
Config.CLIENT = storage.Client()
bucket_name = 'new' + unique_resource_id()
# In the **very** rare case the bucket name is reserved, this
# fails with a ConnectionError.
Config.TEST_BUCKET = Config.CLIENT.bucket(bucket_name)
retry_429(Config.TEST_BUCKET.create)()
示例15: test_copy_existing_file_with_user_project
def test_copy_existing_file_with_user_project(self):
new_bucket_name = 'copy-w-requester-pays' + unique_resource_id('-')
created = Config.CLIENT.create_bucket(
new_bucket_name, requester_pays=True)
self.case_buckets_to_delete.append(new_bucket_name)
self.assertEqual(created.name, new_bucket_name)
self.assertTrue(created.requester_pays)
to_delete = []
blob = storage.Blob('simple', bucket=created)
blob.upload_from_string(b'DEADBEEF')
to_delete.append(blob)
try:
with_user_project = Config.CLIENT.bucket(
new_bucket_name, user_project=USER_PROJECT)
new_blob = retry_bad_copy(with_user_project.copy_blob)(
blob, with_user_project, 'simple-copy')
to_delete.append(new_blob)
base_contents = blob.download_as_string()
copied_contents = new_blob.download_as_string()
self.assertEqual(base_contents, copied_contents)
finally:
for blob in to_delete:
retry_429(blob.delete)()