本文整理汇总了Python中pymongo.topology.Topology类的典型用法代码示例。如果您正苦于以下问题:Python Topology类的具体用法?Python Topology怎么用?Python Topology使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Topology类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_discover_set_name_from_primary
def test_discover_set_name_from_primary(self):
# Discovering a replica set without the setName supplied by the user
# is not yet supported by MongoClient, but Topology can do it.
topology_settings = SetNameDiscoverySettings(
seeds=[address],
pool_class=MockPool,
monitor_class=MockMonitor)
t = Topology(topology_settings)
self.assertEqual(t.description.replica_set_name, None)
self.assertEqual(t.description.topology_type,
TOPOLOGY_TYPE.ReplicaSetNoPrimary)
t.open()
got_ismaster(t, address, {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a']})
self.assertEqual(t.description.replica_set_name, 'rs')
self.assertEqual(t.description.topology_type,
TOPOLOGY_TYPE.ReplicaSetWithPrimary)
# Another response from the primary. Tests the code that processes
# primary response when topology type is already ReplicaSetWithPrimary.
got_ismaster(t, address, {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a']})
# No change.
self.assertEqual(t.description.replica_set_name, 'rs')
self.assertEqual(t.description.topology_type,
TOPOLOGY_TYPE.ReplicaSetWithPrimary)
示例2: create_mock_topology
def create_mock_topology(
seeds=None,
replica_set_name=None,
monitor_class=MockMonitor):
partitioned_seeds = list(imap(common.partition_node, seeds or ['a']))
topology_settings = TopologySettings(
partitioned_seeds,
replica_set_name=replica_set_name,
pool_class=MockPool,
monitor_class=monitor_class)
t = Topology(topology_settings)
t.open()
return t
示例3: run_scenario
def run_scenario(self):
if 'heartbeatFrequencyMS' in scenario_def:
frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0
else:
frequency = HEARTBEAT_FREQUENCY
# Initialize topologies.
seeds, hosts = get_addresses(
scenario_def['topology_description']['servers'])
topology = Topology(
TopologySettings(seeds=seeds,
monitor_class=MockMonitor,
pool_class=MockPool,
heartbeat_frequency=frequency))
# Update topologies with server descriptions.
for server in scenario_def['topology_description']['servers']:
server_description = make_server_description(server, hosts)
topology.on_change(server_description)
# Create server selector.
# Make first letter lowercase to match read_pref's modes.
pref_def = scenario_def['read_preference']
mode_string = pref_def.get('mode', 'primary')
mode_string = mode_string[:1].lower() + mode_string[1:]
mode = read_preferences.read_pref_mode_from_name(mode_string)
max_staleness = pref_def.get('maxStalenessMS', 0) / 1000.0
tag_sets = pref_def.get('tag_sets')
if scenario_def.get('error'):
with self.assertRaises(ConfigurationError):
# Error can be raised when making Read Pref or selecting.
pref = read_preferences.make_read_preference(
mode, tag_sets=tag_sets, max_staleness=max_staleness)
topology.select_server(pref)
return
expected_addrs = set([
server['address'] for server in scenario_def['in_latency_window']])
# Select servers.
pref = read_preferences.make_read_preference(
mode, tag_sets=tag_sets, max_staleness=max_staleness)
if not expected_addrs:
with self.assertRaises(ConnectionFailure):
topology.select_servers(pref, server_selection_timeout=0)
return
servers = topology.select_servers(pref, server_selection_timeout=0)
actual_addrs = set(['%s:%d' % s.description.address for s in servers])
for unexpected in actual_addrs - expected_addrs:
self.fail("'%s' shouldn't have been selected, but was" % unexpected)
for unselected in expected_addrs - actual_addrs:
self.fail("'%s' should have been selected, but wasn't" % unselected)
示例4: create_mock_topology
def create_mock_topology(uri, monitor_class=MockMonitor):
# Some tests in the spec include URIs like mongodb://A/?connect=direct,
# but PyMongo considers any single-seed URI with no setName to be "direct".
parsed_uri = parse_uri(uri.replace('connect=direct', ''))
replica_set_name = None
if 'replicaset' in parsed_uri['options']:
replica_set_name = parsed_uri['options']['replicaset']
topology_settings = TopologySettings(
parsed_uri['nodelist'],
replica_set_name=replica_set_name,
pool_class=MockPool,
monitor_class=monitor_class)
c = Topology(topology_settings)
c.open()
return c
示例5: test_timeout_configuration
def test_timeout_configuration(self):
pool_options = PoolOptions(connect_timeout=1, socket_timeout=2)
topology_settings = TopologySettings(pool_options=pool_options)
t = Topology(topology_settings=topology_settings)
t.open()
# Get the default server.
server = t.get_server_by_address(('localhost', 27017))
# The pool for application operations obeys our settings.
self.assertEqual(1, server._pool.opts.connect_timeout)
self.assertEqual(2, server._pool.opts.socket_timeout)
# The pool for monitoring operations uses our connect_timeout as both
# its connect_timeout and its socket_timeout.
monitor = server._monitor
self.assertEqual(1, monitor._pool.opts.connect_timeout)
self.assertEqual(1, monitor._pool.opts.socket_timeout)
# The monitor, not its pool, is responsible for calling ismaster.
self.assertFalse(monitor._pool.handshake)
示例6: test_discover_set_name_from_secondary
def test_discover_set_name_from_secondary(self):
# Discovering a replica set without the setName supplied by the user
# is not yet supported by MongoClient, but Topology can do it.
topology_settings = SetNameDiscoverySettings(
seeds=[address],
pool_class=MockPool,
monitor_class=MockMonitor)
t = Topology(topology_settings)
self.assertEqual(t.description.replica_set_name, None)
self.assertEqual(t.description.topology_type,
TOPOLOGY_TYPE.ReplicaSetNoPrimary)
t.open()
got_ismaster(t, address, {
'ok': 1,
'ismaster': False,
'secondary': True,
'setName': 'rs',
'hosts': ['a']})
self.assertEqual(t.description.replica_set_name, 'rs')
self.assertEqual(t.description.topology_type,
TOPOLOGY_TYPE.ReplicaSetNoPrimary)
示例7: test_latency_threshold_application
def test_latency_threshold_application(self):
selector = SelectionStoreSelector()
scenario_def = {
'topology_description': {
'type': 'ReplicaSetWithPrimary', 'servers': [
{'address': 'b:27017',
'avg_rtt_ms': 10000,
'type': 'RSSecondary',
'tag': {}},
{'address': 'c:27017',
'avg_rtt_ms': 20000,
'type': 'RSSecondary',
'tag': {}},
{'address': 'a:27017',
'avg_rtt_ms': 30000,
'type': 'RSPrimary',
'tag': {}},
]}}
# Create & populate Topology such that all but one server is too slow.
rtt_times = [srv['avg_rtt_ms'] for srv in
scenario_def['topology_description']['servers']]
min_rtt_idx = rtt_times.index(min(rtt_times))
seeds, hosts = get_addresses(
scenario_def["topology_description"]["servers"])
settings = get_topology_settings_dict(
heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds,
server_selector=selector)
topology = Topology(TopologySettings(**settings))
topology.open()
for server in scenario_def['topology_description']['servers']:
server_description = make_server_description(server, hosts)
topology.on_change(server_description)
# Invoke server selection and assert no filtering based on latency
# prior to custom server selection logic kicking in.
server = topology.select_server(ReadPreference.NEAREST)
self.assertEqual(
len(selector.selection),
len(topology.description.server_descriptions()))
# Ensure proper filtering based on latency after custom selection.
self.assertEqual(
server.description.address, seeds[min_rtt_idx])
示例8: test_server_selector_bypassed
def test_server_selector_bypassed(self):
selector = CallCountSelector()
scenario_def = {
'topology_description': {
'type': 'ReplicaSetNoPrimary', 'servers': [
{'address': 'b:27017',
'avg_rtt_ms': 10000,
'type': 'RSSecondary',
'tag': {}},
{'address': 'c:27017',
'avg_rtt_ms': 20000,
'type': 'RSSecondary',
'tag': {}},
{'address': 'a:27017',
'avg_rtt_ms': 30000,
'type': 'RSSecondary',
'tag': {}},
]}}
# Create & populate Topology such that no server is writeable.
seeds, hosts = get_addresses(
scenario_def["topology_description"]["servers"])
settings = get_topology_settings_dict(
heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds,
server_selector=selector)
topology = Topology(TopologySettings(**settings))
topology.open()
for server in scenario_def['topology_description']['servers']:
server_description = make_server_description(server, hosts)
topology.on_change(server_description)
# Invoke server selection and assert no calls to our custom selector.
with self.assertRaisesRegex(
ServerSelectionTimeoutError, 'No primary available for writes'):
topology.select_server(
writable_server_selector, server_selection_timeout=0.1)
self.assertEqual(selector.call_count, 0)
示例9: __init__
#.........这里部分代码省略.........
client['__my_database__']
Not::
client.__my_database__
"""
if host is None:
host = self.HOST
if isinstance(host, string_type):
host = [host]
if port is None:
port = self.PORT
if not isinstance(port, int):
raise TypeError("port must be an instance of int")
seeds = set()
username = None
password = None
dbase = None
opts = {}
for entity in host:
if "://" in entity:
if entity.startswith("mongodb://"):
res = uri_parser.parse_uri(entity, port, False)
seeds.update(res["nodelist"])
username = res["username"] or username
password = res["password"] or password
dbase = res["database"] or dbase
opts = res["options"]
else:
idx = entity.find("://")
raise InvalidURI("Invalid URI scheme: "
"%s" % (entity[:idx],))
else:
seeds.update(uri_parser.split_hosts(entity, port))
if not seeds:
raise ConfigurationError("need to specify at least one host")
# _pool_class, _monitor_class, and _condition_class are for deep
# customization of PyMongo, e.g. Motor.
pool_class = kwargs.pop('_pool_class', None)
monitor_class = kwargs.pop('_monitor_class', None)
condition_class = kwargs.pop('_condition_class', None)
kwargs['document_class'] = document_class
kwargs['tz_aware'] = tz_aware
opts.update(kwargs)
self.__options = options = ClientOptions(
username, password, dbase, opts)
self.__default_database_name = dbase
self.__lock = threading.Lock()
self.__cursor_manager = CursorManager(self)
self.__kill_cursors_queue = []
# Cache of existing indexes used by ensure_index ops.
self.__index_cache = {}
super(MongoClient, self).__init__(options.codec_options,
options.read_preference,
options.write_concern)
self.__all_credentials = {}
creds = options.credentials
if creds:
self._cache_credentials(creds.source, creds)
self._topology_settings = TopologySettings(
seeds=seeds,
replica_set_name=options.replica_set_name,
pool_class=pool_class,
pool_options=options.pool_options,
monitor_class=monitor_class,
condition_class=condition_class,
local_threshold_ms=options.local_threshold_ms,
server_selection_timeout=options.server_selection_timeout)
self._topology = Topology(self._topology_settings)
if connect:
self._topology.open()
def target():
client = self_ref()
if client is None:
return False # Stop the executor.
MongoClient._process_kill_cursors_queue(client)
return True
executor = periodic_executor.PeriodicExecutor(
condition_class=self._topology_settings.condition_class,
interval=common.KILL_CURSOR_FREQUENCY,
min_interval=0,
target=target)
# We strongly reference the executor and it weakly references us via
# this closure. When the client is freed, stop the executor soon.
self_ref = weakref.ref(self, executor.close)
self._kill_cursors_executor = executor
executor.open()
示例10: MongoClient
#.........这里部分代码省略.........
seeds.update(uri_parser.split_hosts(entity, port))
if not seeds:
raise ConfigurationError("need to specify at least one host")
# _pool_class, _monitor_class, and _condition_class are for deep
# customization of PyMongo, e.g. Motor.
pool_class = kwargs.pop('_pool_class', None)
monitor_class = kwargs.pop('_monitor_class', None)
condition_class = kwargs.pop('_condition_class', None)
kwargs['document_class'] = document_class
kwargs['tz_aware'] = tz_aware
opts.update(kwargs)
self.__options = options = ClientOptions(
username, password, dbase, opts)
self.__default_database_name = dbase
self.__lock = threading.Lock()
self.__cursor_manager = CursorManager(self)
self.__kill_cursors_queue = []
# Cache of existing indexes used by ensure_index ops.
self.__index_cache = {}
super(MongoClient, self).__init__(options.codec_options,
options.read_preference,
options.write_concern)
self.__all_credentials = {}
creds = options.credentials
if creds:
self._cache_credentials(creds.source, creds)
self._topology_settings = TopologySettings(
seeds=seeds,
replica_set_name=options.replica_set_name,
pool_class=pool_class,
pool_options=options.pool_options,
monitor_class=monitor_class,
condition_class=condition_class,
local_threshold_ms=options.local_threshold_ms,
server_selection_timeout=options.server_selection_timeout)
self._topology = Topology(self._topology_settings)
if connect:
self._topology.open()
def target():
client = self_ref()
if client is None:
return False # Stop the executor.
MongoClient._process_kill_cursors_queue(client)
return True
executor = periodic_executor.PeriodicExecutor(
condition_class=self._topology_settings.condition_class,
interval=common.KILL_CURSOR_FREQUENCY,
min_interval=0,
target=target)
# We strongly reference the executor and it weakly references us via
# this closure. When the client is freed, stop the executor soon.
self_ref = weakref.ref(self, executor.close)
self._kill_cursors_executor = executor
executor.open()
示例11: run_scenario
def run_scenario(self):
# Initialize topologies.
seeds, hosts = get_addresses(
scenario_def['topology_description']['servers'])
# "Eligible servers" is defined in the server selection spec as
# the set of servers matching both the ReadPreference's mode
# and tag sets.
top_latency = Topology(
TopologySettings(seeds=seeds, monitor_class=MockMonitor,
pool_class=MockPool))
# "In latency window" is defined in the server selection
# spec as the subset of suitable_servers that falls within the
# allowable latency window.
top_suitable = Topology(
TopologySettings(seeds=seeds, local_threshold_ms=1000000,
monitor_class=MockMonitor,
pool_class=MockPool))
# Update topologies with server descriptions.
for server in scenario_def['topology_description']['servers']:
server_description = make_server_description(server, hosts)
top_suitable.on_change(server_description)
top_latency.on_change(server_description)
# Create server selector.
if scenario_def["operation"] == "write":
instance = writable_server_selector
else:
# Make first letter lowercase to match read_pref's modes.
mode_string = scenario_def['read_preference']['mode']
if mode_string:
mode_string = mode_string[:1].lower() + mode_string[1:]
mode = read_preferences.read_pref_mode_from_name(mode_string)
tag_sets = None
if scenario_def['read_preference']['tag_sets'][0]:
tag_sets = scenario_def['read_preference']['tag_sets']
instance = read_preferences.make_read_preference(mode, tag_sets)
# Select servers.
if not scenario_def['suitable_servers']:
self.assertRaises(AutoReconnect, top_suitable.select_server,
instance,
server_selection_timeout=0)
return
if not scenario_def['in_latency_window']:
self.assertRaises(AutoReconnect, top_latency.select_server,
instance,
server_selection_timeout=0)
return
actual_suitable_s = top_suitable.select_servers(instance,
server_selection_timeout=0)
actual_latency_s = top_latency.select_servers(instance,
server_selection_timeout=0)
expected_suitable_servers = {}
for server in scenario_def['suitable_servers']:
server_description = make_server_description(server, hosts)
expected_suitable_servers[server['address']] = server_description
actual_suitable_servers = {}
for s in actual_suitable_s:
actual_suitable_servers["%s:%d" % (s.description.address[0],
s.description.address[1])] = s.description
self.assertEqual(len(actual_suitable_servers),
len(expected_suitable_servers))
for k, actual in actual_suitable_servers.items():
expected = expected_suitable_servers[k]
self.assertEqual(expected.address, actual.address)
self.assertEqual(expected.server_type, actual.server_type)
self.assertEqual(expected.round_trip_time, actual.round_trip_time)
self.assertEqual(expected.tags, actual.tags)
self.assertEqual(expected.all_hosts, actual.all_hosts)
expected_latency_servers = {}
for server in scenario_def['in_latency_window']:
server_description = make_server_description(server, hosts)
expected_latency_servers[server['address']] = server_description
actual_latency_servers = {}
for s in actual_latency_s:
actual_latency_servers["%s:%d" %
(s.description.address[0],
s.description.address[1])] = s.description
self.assertEqual(len(actual_latency_servers),
len(expected_latency_servers))
for k, actual in actual_latency_servers.items():
expected = expected_latency_servers[k]
self.assertEqual(expected.address, actual.address)
self.assertEqual(expected.server_type, actual.server_type)
self.assertEqual(expected.round_trip_time, actual.round_trip_time)
self.assertEqual(expected.tags, actual.tags)
self.assertEqual(expected.all_hosts, actual.all_hosts)
示例12: run_scenario
def run_scenario(self):
# Initialize topologies.
if 'heartbeatFrequencyMS' in scenario_def:
frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0
else:
frequency = HEARTBEAT_FREQUENCY
seeds, hosts = get_addresses(
scenario_def['topology_description']['servers'])
settings = get_topology_settings_dict(
heartbeat_frequency=frequency,
seeds=seeds
)
# "Eligible servers" is defined in the server selection spec as
# the set of servers matching both the ReadPreference's mode
# and tag sets.
top_latency = Topology(TopologySettings(**settings))
top_latency.open()
# "In latency window" is defined in the server selection
# spec as the subset of suitable_servers that falls within the
# allowable latency window.
settings['local_threshold_ms'] = 1000000
top_suitable = Topology(TopologySettings(**settings))
top_suitable.open()
# Update topologies with server descriptions.
for server in scenario_def['topology_description']['servers']:
server_description = make_server_description(server, hosts)
top_suitable.on_change(server_description)
top_latency.on_change(server_description)
# Create server selector.
if scenario_def.get("operation") == "write":
pref = writable_server_selector
else:
# Make first letter lowercase to match read_pref's modes.
pref_def = scenario_def['read_preference']
if scenario_def.get('error'):
with self.assertRaises((ConfigurationError, ValueError)):
# Error can be raised when making Read Pref or selecting.
pref = parse_read_preference(pref_def)
top_latency.select_server(pref)
return
pref = parse_read_preference(pref_def)
# Select servers.
if not scenario_def.get('suitable_servers'):
with self.assertRaises(AutoReconnect):
top_suitable.select_server(pref, server_selection_timeout=0)
return
if not scenario_def['in_latency_window']:
with self.assertRaises(AutoReconnect):
top_latency.select_server(pref, server_selection_timeout=0)
return
actual_suitable_s = top_suitable.select_servers(
pref, server_selection_timeout=0)
actual_latency_s = top_latency.select_servers(
pref, server_selection_timeout=0)
expected_suitable_servers = {}
for server in scenario_def['suitable_servers']:
server_description = make_server_description(server, hosts)
expected_suitable_servers[server['address']] = server_description
actual_suitable_servers = {}
for s in actual_suitable_s:
actual_suitable_servers["%s:%d" % (s.description.address[0],
s.description.address[1])] = s.description
self.assertEqual(len(actual_suitable_servers),
len(expected_suitable_servers))
for k, actual in actual_suitable_servers.items():
expected = expected_suitable_servers[k]
self.assertEqual(expected.address, actual.address)
self.assertEqual(expected.server_type, actual.server_type)
self.assertEqual(expected.round_trip_time, actual.round_trip_time)
self.assertEqual(expected.tags, actual.tags)
self.assertEqual(expected.all_hosts, actual.all_hosts)
expected_latency_servers = {}
for server in scenario_def['in_latency_window']:
server_description = make_server_description(server, hosts)
expected_latency_servers[server['address']] = server_description
actual_latency_servers = {}
for s in actual_latency_s:
actual_latency_servers["%s:%d" %
(s.description.address[0],
s.description.address[1])] = s.description
self.assertEqual(len(actual_latency_servers),
len(expected_latency_servers))
#.........这里部分代码省略.........