本文整理汇总了Python中cassandra.pool.Host类的典型用法代码示例。如果您正苦于以下问题:Python Host类的具体用法?Python Host怎么用?Python Host使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Host类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_get_distance
def test_get_distance(self):
"""
Same test as DCAwareRoundRobinPolicyTest.test_get_distance()
Except a FakeCluster is needed for the metadata variable and
policy.child_policy is needed to change child policy settings
"""
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0))
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
policy.populate(self.FakeCluster(), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
# used_hosts_per_remote_dc is set to 0, so ignore it
remote_host = Host("ip2", SimpleConvictionPolicy)
remote_host.set_location_info("dc2", "rack1")
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# dc2 isn't registered in the policy's live_hosts dict
policy._child_policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# make sure the policy has both dcs registered
policy.populate(self.FakeCluster(), [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
# since used_hosts_per_remote_dc is set to 1, only the first
# remote host in dc2 will be REMOTE, the rest are IGNORED
second_remote_host = Host("ip3", SimpleConvictionPolicy)
second_remote_host.set_location_info("dc2", "rack1")
policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
示例2: test_no_remote
def test_no_remote(self):
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1")
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), sorted(hosts))
示例3: test_non_implemented
def test_non_implemented(self):
"""
Code coverage for interface-style base class
"""
policy = LoadBalancingPolicy()
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
self.assertRaises(NotImplementedError, policy.distance, host)
self.assertRaises(NotImplementedError, policy.populate, None, host)
self.assertRaises(NotImplementedError, policy.make_query_plan)
self.assertRaises(NotImplementedError, policy.on_up, host)
self.assertRaises(NotImplementedError, policy.on_down, host)
self.assertRaises(NotImplementedError, policy.on_add, host)
self.assertRaises(NotImplementedError, policy.on_remove, host)
示例4: test_version_parsing
def test_version_parsing(self):
host = Host('127.0.0.1', SimpleConvictionPolicy)
host.set_version("1.0.0")
self.assertEqual((1, 0, 0), host.version)
host.set_version("1.0")
self.assertEqual((1, 0, 0), host.version)
host.set_version("1.0.0-beta1")
self.assertEqual((1, 0, 0, 'beta1'), host.version)
host.set_version("1.0-SNAPSHOT")
self.assertEqual((1, 0, 0, 'SNAPSHOT'), host.version)
示例5: test_wait_for_schema_agreement_rpc_lookup
def test_wait_for_schema_agreement_rpc_lookup(self):
"""
If the rpc_address is 0.0.0.0, the "peer" column should be used instead.
"""
self.connection.peer_results[1].append(["0.0.0.0", PEER_IP, "b", "dc1", "rack1", ["3", "103", "203"]])
host = Host("0.0.0.0", SimpleConvictionPolicy)
self.cluster.metadata.hosts[PEER_IP] = host
host.is_up = False
# even though the new host has a different schema version, it's
# marked as down, so the control connection shouldn't care
self.assertTrue(self.control_connection.wait_for_schema_agreement())
self.assertEqual(self.time.clock, 0)
# but once we mark it up, the control connection will care
host.is_up = True
self.assertFalse(self.control_connection.wait_for_schema_agreement())
self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait)
示例6: test_no_live_nodes
def test_no_live_nodes(self):
"""
Ensure query plan for a downed cluster will execute without errors
"""
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(), hosts)
for host in hosts:
policy.on_down(host)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
示例7: test_status_updates
def test_status_updates(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(None, hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info("dc1", "rack1")
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info("dc9000", "rack1")
policy.on_add(new_remote_host)
# we now have two local hosts and two remote hosts in separate dcs
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
# since we have hosts in dc9000, the distance shouldn't be IGNORED
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
示例8: test_nts_token_performance
def test_nts_token_performance(self):
"""
Tests to ensure that when rf exceeds the number of nodes available, that we dont'
needlessly iterate trying to construct tokens for nodes that don't exist.
@since 3.7
@jira_ticket PYTHON-379
@expected_result timing with 1500 rf should be same/similar to 3rf if we have 3 nodes
@test_category metadata
"""
token_to_host_owner = {}
ring = []
dc1hostnum = 3
current_token = 0
vnodes_per_host = 500
for i in range(dc1hostnum):
host = Host('dc1.{0}'.format(i), SimpleConvictionPolicy)
host.set_location_info('dc1', "rack1")
for vnode_num in range(vnodes_per_host):
md5_token = MD5Token(current_token+vnode_num)
token_to_host_owner[md5_token] = host
ring.append(md5_token)
current_token += 1000
nts = NetworkTopologyStrategy({'dc1': 3})
start_time = timeit.default_timer()
nts.make_token_replica_map(token_to_host_owner, ring)
elapsed_base = timeit.default_timer() - start_time
nts = NetworkTopologyStrategy({'dc1': 1500})
start_time = timeit.default_timer()
nts.make_token_replica_map(token_to_host_owner, ring)
elapsed_bad = timeit.default_timer() - start_time
difference = elapsed_bad - elapsed_base
self.assertTrue(difference < 1 and difference > -1)
示例9: test_modification_during_generation
def test_modification_during_generation(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=3)
policy.populate(Mock(), hosts)
# The general concept here is to change thee internal state of the
# policy during plan generation. In this case we use a grey-box
# approach that changes specific things during known phases of the
# generator.
new_host = Host(4, SimpleConvictionPolicy)
new_host.set_location_info("dc1", "rack1")
# new local before iteration
plan = policy.make_query_plan()
policy.on_up(new_host)
# local list is not bound yet, so we get to see that one
self.assertEqual(len(list(plan)), 3 + 2)
# remove local before iteration
plan = policy.make_query_plan()
policy.on_down(new_host)
# local list is not bound yet, so we don't see it
self.assertEqual(len(list(plan)), 2 + 2)
# new local after starting iteration
plan = policy.make_query_plan()
next(plan)
policy.on_up(new_host)
# local list was is bound, and one consumed, so we only see the other original
self.assertEqual(len(list(plan)), 1 + 2)
# remove local after traversing available
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
policy.on_down(new_host)
# we should be past the local list
self.assertEqual(len(list(plan)), 0 + 2)
# REMOTES CHANGE
new_host.set_location_info("dc2", "rack1")
# new remote after traversing local, but not starting remote
plan = policy.make_query_plan()
for _ in range(2):
next(plan)
policy.on_up(new_host)
# list is updated before we get to it
self.assertEqual(len(list(plan)), 0 + 3)
# remove remote after traversing local, but not starting remote
plan = policy.make_query_plan()
for _ in range(2):
next(plan)
policy.on_down(new_host)
# list is updated before we get to it
self.assertEqual(len(list(plan)), 0 + 2)
# new remote after traversing local, and starting remote
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
policy.on_up(new_host)
# slice is already made, and we've consumed one
self.assertEqual(len(list(plan)), 0 + 1)
# remove remote after traversing local, and starting remote
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
policy.on_down(new_host)
# slice is created with all present, and we've consumed one
self.assertEqual(len(list(plan)), 0 + 2)
# local DC disappears after finishing it, but not starting remote
plan = policy.make_query_plan()
for _ in range(2):
next(plan)
policy.on_down(hosts[0])
policy.on_down(hosts[1])
# dict traversal starts as normal
self.assertEqual(len(list(plan)), 0 + 2)
policy.on_up(hosts[0])
policy.on_up(hosts[1])
# PYTHON-297 addresses the following cases, where DCs come and go
# during generation
# local DC disappears after finishing it, and starting remote
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
policy.on_down(hosts[0])
policy.on_down(hosts[1])
# dict traversal has begun and consumed one
#.........这里部分代码省略.........
示例10: test_nts_make_token_replica_map_multi_rack
def test_nts_make_token_replica_map_multi_rack(self):
token_to_host_owner = {}
# (A) not enough distinct racks, first skipped is used
dc1_1 = Host('dc1.1', SimpleConvictionPolicy)
dc1_2 = Host('dc1.2', SimpleConvictionPolicy)
dc1_3 = Host('dc1.3', SimpleConvictionPolicy)
dc1_4 = Host('dc1.4', SimpleConvictionPolicy)
dc1_1.set_location_info('dc1', 'rack1')
dc1_2.set_location_info('dc1', 'rack1')
dc1_3.set_location_info('dc1', 'rack2')
dc1_4.set_location_info('dc1', 'rack2')
token_to_host_owner[MD5Token(0)] = dc1_1
token_to_host_owner[MD5Token(100)] = dc1_2
token_to_host_owner[MD5Token(200)] = dc1_3
token_to_host_owner[MD5Token(300)] = dc1_4
# (B) distinct racks, but not contiguous
dc2_1 = Host('dc2.1', SimpleConvictionPolicy)
dc2_2 = Host('dc2.2', SimpleConvictionPolicy)
dc2_3 = Host('dc2.3', SimpleConvictionPolicy)
dc2_1.set_location_info('dc2', 'rack1')
dc2_2.set_location_info('dc2', 'rack1')
dc2_3.set_location_info('dc2', 'rack2')
token_to_host_owner[MD5Token(1)] = dc2_1
token_to_host_owner[MD5Token(101)] = dc2_2
token_to_host_owner[MD5Token(201)] = dc2_3
ring = [MD5Token(0),
MD5Token(1),
MD5Token(100),
MD5Token(101),
MD5Token(200),
MD5Token(201),
MD5Token(300)]
nts = NetworkTopologyStrategy({'dc1': 3, 'dc2': 2})
replica_map = nts.make_token_replica_map(token_to_host_owner, ring)
token_replicas = replica_map[MD5Token(0)]
self.assertItemsEqual(token_replicas, (dc1_1, dc1_2, dc1_3, dc2_1, dc2_3))