本文整理汇总了Python中mongo_connector.oplog_manager.OplogThread.start方法的典型用法代码示例。如果您正苦于以下问题:Python OplogThread.start方法的具体用法?Python OplogThread.start怎么用?Python OplogThread.start使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mongo_connector.oplog_manager.OplogThread
的用法示例。
在下文中一共展示了OplogThread.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_skipped_oplog_entry_updates_checkpoint
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
def test_skipped_oplog_entry_updates_checkpoint(self):
repl_set = ReplicaSetSingle().start()
conn = repl_set.client()
opman = OplogThread(
primary_client=conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=NamespaceConfig(namespace_set=["test.test"]),
)
opman.start()
# Insert a document into an included collection
conn["test"]["test"].insert_one({"test": 1})
last_ts = opman.get_last_oplog_timestamp()
assert_soon(
lambda: last_ts == opman.checkpoint,
"OplogThread never updated checkpoint to non-skipped " "entry.",
)
self.assertEqual(len(opman.doc_managers[0]._search()), 1)
# Make sure that the oplog thread updates its checkpoint on every
# oplog entry.
conn["test"]["ignored"].insert_one({"test": 1})
last_ts = opman.get_last_oplog_timestamp()
assert_soon(
lambda: last_ts == opman.checkpoint,
"OplogThread never updated checkpoint to skipped entry.",
)
opman.join()
conn.close()
repl_set.stop()
示例2: run
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
def run(self):
"""Discovers the mongo cluster and creates a thread for each primary.
"""
main_conn = MongoClient(
self.address, tz_aware=self.tz_aware, **self.ssl_kwargs)
if self.auth_key is not None:
main_conn['admin'].authenticate(self.auth_username, self.auth_key)
self.read_oplog_progress()
conn_type = None
try:
main_conn.admin.command("isdbgrid")
except pymongo.errors.OperationFailure:
conn_type = "REPLSET"
if conn_type == "REPLSET":
# Make sure we are connected to a replica set
is_master = main_conn.admin.command("isMaster")
if "setName" not in is_master:
LOG.error(
'No replica set at "%s"! A replica set is required '
'to run mongo-connector. Shutting down...' % self.address
)
return
# Establish a connection to the replica set as a whole
main_conn.close()
main_conn = MongoClient(
self.address, replicaSet=is_master['setName'],
tz_aware=self.tz_aware, **self.ssl_kwargs)
if self.auth_key is not None:
main_conn.admin.authenticate(self.auth_username, self.auth_key)
# non sharded configuration
oplog = OplogThread(
main_conn, self.doc_managers, self.oplog_progress,
**self.kwargs)
self.shard_set[0] = oplog
LOG.info('MongoConnector: Starting connection thread %s' %
main_conn)
oplog.start()
while self.can_run:
if not self.shard_set[0].running:
LOG.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[0])))
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
self.write_oplog_progress()
time.sleep(1)
else: # sharded cluster
while self.can_run is True:
for shard_doc in main_conn['config']['shards'].find():
shard_id = shard_doc['_id']
if shard_id in self.shard_set:
if not self.shard_set[shard_id].running:
LOG.error("MongoConnector: OplogThread "
"%s unexpectedly stopped! Shutting "
"down" %
(str(self.shard_set[shard_id])))
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
self.write_oplog_progress()
time.sleep(1)
continue
try:
repl_set, hosts = shard_doc['host'].split('/')
except ValueError:
cause = "The system only uses replica sets!"
LOG.exception("MongoConnector: %s", cause)
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
shard_conn = MongoClient(
hosts, replicaSet=repl_set, tz_aware=self.tz_aware,
**self.ssl_kwargs)
if self.auth_key is not None:
shard_conn['admin'].authenticate(self.auth_username, self.auth_key)
oplog = OplogThread(
shard_conn, self.doc_managers, self.oplog_progress,
**self.kwargs)
self.shard_set[shard_id] = oplog
msg = "Starting connection thread"
LOG.info("MongoConnector: %s %s" % (msg, shard_conn))
oplog.start()
self.oplog_thread_join()
self.write_oplog_progress()
示例3: TestCommandReplication
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestCommandReplication(unittest.TestCase):
def setUp(self):
self.repl_set = ReplicaSetSingle().start()
self.primary_conn = self.repl_set.client()
self.oplog_progress = LockingDict()
self.opman = None
def tearDown(self):
try:
if self.opman:
self.opman.join()
except RuntimeError:
pass
close_client(self.primary_conn)
self.repl_set.stop()
def initOplogThread(self, namespace_set=None):
self.docman = CommandLoggerDocManager()
namespace_config = NamespaceConfig(namespace_set=namespace_set)
self.docman.command_helper = CommandHelper(namespace_config)
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(self.docman,),
oplog_progress_dict=self.oplog_progress,
namespace_config=namespace_config,
collection_dump=False
)
self.opman.start()
def test_command_helper(self):
mapping = {
'a.x': 'b.x',
'a.y': 'c.y'
}
helper = CommandHelper(NamespaceConfig(
namespace_set=list(mapping) + ['a.z'], namespace_options=mapping))
self.assertEqual(set(helper.map_db('a')), set(['a', 'b', 'c']))
self.assertEqual(helper.map_db('d'), [])
self.assertEqual(helper.map_namespace('a.x'), 'b.x')
self.assertEqual(helper.map_namespace('a.z'), 'a.z')
self.assertEqual(helper.map_namespace('d.x'), None)
self.assertEqual(helper.map_collection('a', 'x'), ('b', 'x'))
self.assertEqual(helper.map_collection('a', 'z'), ('a', 'z'))
self.assertEqual(helper.map_collection('d', 'x'), (None, None))
def test_create_collection(self):
self.initOplogThread()
pymongo.collection.Collection(
self.primary_conn['test'], 'test', create=True)
assert_soon(lambda: self.docman.commands)
command = self.docman.commands[0]
self.assertEqual(command['create'], 'test')
def test_create_collection_skipped(self):
self.initOplogThread(['test.test'])
pymongo.collection.Collection(
self.primary_conn['test2'], 'test2', create=True)
pymongo.collection.Collection(
self.primary_conn['test'], 'test', create=True)
assert_soon(lambda: self.docman.commands)
self.assertEqual(len(self.docman.commands), 1)
command = self.docman.commands[0]
self.assertEqual(command['create'], 'test')
def test_drop_collection(self):
self.initOplogThread()
coll = pymongo.collection.Collection(
self.primary_conn['test'], 'test', create=True)
coll.drop()
assert_soon(lambda: len(self.docman.commands) == 2)
self.assertEqual(self.docman.commands[1], {'drop': 'test'})
def test_drop_database(self):
self.initOplogThread()
pymongo.collection.Collection(
self.primary_conn['test'], 'test', create=True)
self.primary_conn.drop_database('test')
assert_soon(lambda: len(self.docman.commands) == 2)
self.assertEqual(self.docman.commands[1], {'dropDatabase': 1})
def test_rename_collection(self):
self.initOplogThread()
coll = pymongo.collection.Collection(
self.primary_conn['test'], 'test', create=True)
coll.rename('test2')
assert_soon(lambda: len(self.docman.commands) == 2)
self.assertEqual(
self.docman.commands[1].get('renameCollection'),
'test.test')
self.assertEqual(
self.docman.commands[1].get('to'),
'test.test2')
示例4: TestRollbacks
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestRollbacks(unittest.TestCase):
def tearDown(self):
close_client(self.primary_conn)
close_client(self.secondary_conn)
self.repl_set.stop()
def setUp(self):
# Create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Start a replica set
self.repl_set = ReplicaSet().start()
# Connection to the replica set as a whole
self.main_conn = self.repl_set.client()
# Connection to the primary specifically
self.primary_conn = self.repl_set.primary.client()
# Connection to the secondary specifically
self.secondary_conn = self.repl_set.secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED)
# Wipe any test data
self.main_conn["test"]["mc"].drop()
# Oplog thread
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman = OplogThread(
primary_client=self.main_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
ns_set=["test.mc"]
)
def test_single_target(self):
"""Test with a single replication target"""
self.opman.start()
# Insert first document with primary up
self.main_conn["test"]["mc"].insert_one({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary")
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary["admin"].command("isMaster")["ismaster"])
# Insert another document. This will be rolled back later
retry_until_ok(self.main_conn["test"]["mc"].insert_one, {"i": 1})
self.assertEqual(secondary["test"]["mc"].count(), 2)
# Wait for replication to doc manager
assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 2,
"not all writes were replicated to doc manager")
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status")
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(secondary.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
assert_soon(lambda:
retry_until_ok(self.main_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted")
# Only first document should exist in MongoDB
self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)
# Same case should hold for the doc manager
doc_manager = self.opman.doc_managers[0]
assert_soon(lambda: len(doc_manager._search()) == 1,
'documents never rolled back in doc manager.')
self.assertEqual(doc_manager._search()[0]["i"], 0)
# cleanup
self.opman.join()
def test_many_targets(self):
"""Test with several replication targets"""
# OplogThread has multiple doc managers
doc_managers = [DocManager(), DocManager(), DocManager()]
#.........这里部分代码省略.........
示例5: TestOplogManagerSharded
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestOplogManagerSharded(unittest.TestCase):
"""Defines all test cases for OplogThreads running on a sharded
cluster
"""
def setUp(self):
""" Initialize the cluster:
Clean out the databases used by the tests
Make connections to mongos, mongods
Create and shard test collections
Create OplogThreads
"""
# Start the cluster with a mongos on port 27217
self.mongos_p = start_cluster()
# Connection to mongos
mongos_address = '%s:%d' % (mongo_host, self.mongos_p)
self.mongos_conn = MongoClient(mongos_address)
# Connections to the shards
shard1_ports = get_shard(self.mongos_p, 0)
shard2_ports = get_shard(self.mongos_p, 1)
self.shard1_prim_p = shard1_ports['primary']
self.shard1_scnd_p = shard1_ports['secondaries'][0]
self.shard2_prim_p = shard2_ports['primary']
self.shard2_scnd_p = shard2_ports['secondaries'][0]
self.shard1_conn = MongoClient('%s:%d'
% (mongo_host, self.shard1_prim_p),
replicaSet="demo-set-0")
self.shard2_conn = MongoClient('%s:%d'
% (mongo_host, self.shard2_prim_p),
replicaSet="demo-set-1")
self.shard1_secondary_conn = MongoClient(
'%s:%d' % (mongo_host, self.shard1_scnd_p),
read_preference=ReadPreference.SECONDARY_PREFERRED
)
self.shard2_secondary_conn = MongoClient(
'%s:%d' % (mongo_host, self.shard2_scnd_p),
read_preference=ReadPreference.SECONDARY_PREFERRED
)
# Wipe any test data
self.mongos_conn["test"]["mcsharded"].drop()
# Create and shard the collection test.mcsharded on the "i" field
self.mongos_conn["test"]["mcsharded"].ensure_index("i")
self.mongos_conn.admin.command("enableSharding", "test")
self.mongos_conn.admin.command("shardCollection",
"test.mcsharded",
key={"i": 1})
# Pre-split the collection so that:
# i < 1000 lives on shard1
# i >= 1000 lives on shard2
self.mongos_conn.admin.command(bson.SON([
("split", "test.mcsharded"),
("middle", {"i": 1000})
]))
# disable the balancer
self.mongos_conn.config.settings.update(
{"_id": "balancer"},
{"$set": {"stopped": True}},
upsert=True
)
# Move chunks to their proper places
try:
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1},
to="demo-set-0"
)
except pymongo.errors.OperationFailure:
pass # chunk may already be on the correct shard
try:
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1000},
to="demo-set-1"
)
except pymongo.errors.OperationFailure:
pass # chunk may already be on the correct shard
# Make sure chunks are distributed correctly
self.mongos_conn["test"]["mcsharded"].insert({"i": 1})
self.mongos_conn["test"]["mcsharded"].insert({"i": 1000})
def chunks_moved():
doc1 = self.shard1_conn.test.mcsharded.find_one()
doc2 = self.shard2_conn.test.mcsharded.find_one()
if None in (doc1, doc2):
return False
return doc1['i'] == 1 and doc2['i'] == 1000
assert_soon(chunks_moved)
self.mongos_conn.test.mcsharded.remove()
#.........这里部分代码省略.........
示例6: run
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
def run(self):
"""Discovers the mongo cluster and creates a thread for each primary.
"""
main_conn = MongoClient(self.address)
if self.auth_key is not None:
main_conn['admin'].authenticate(self.auth_username, self.auth_key)
self.read_oplog_progress()
conn_type = None
try:
main_conn.admin.command("isdbgrid")
except pymongo.errors.OperationFailure:
conn_type = "REPLSET"
if conn_type == "REPLSET":
# Make sure we are connected to a replica set
is_master = main_conn.admin.command("isMaster")
if not "setName" in is_master:
logging.error(
'No replica set at "%s"! A replica set is required '
'to run mongo-connector. Shutting down...' % self.address
)
return
# Establish a connection to the replica set as a whole
main_conn.disconnect()
main_conn = MongoClient(self.address,
replicaSet=is_master['setName'])
if self.auth_key is not None:
main_conn.admin.authenticate(self.auth_username, self.auth_key)
#non sharded configuration
oplog_coll = main_conn['local']['oplog.rs']
oplog = OplogThread(
primary_conn=main_conn,
main_address=self.address,
oplog_coll=oplog_coll,
is_sharded=False,
doc_manager=self.doc_managers,
oplog_progress_dict=self.oplog_progress,
namespace_set=self.ns_set,
auth_key=self.auth_key,
auth_username=self.auth_username,
repl_set=is_master['setName'],
collection_dump=self.collection_dump,
batch_size=self.batch_size,
fields=self.fields,
dest_mapping=self.dest_mapping,
continue_on_error=self.continue_on_error
)
self.shard_set[0] = oplog
logging.info('MongoConnector: Starting connection thread %s' %
main_conn)
oplog.start()
while self.can_run:
if not self.shard_set[0].running:
logging.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[0])))
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
self.write_oplog_progress()
time.sleep(1)
else: # sharded cluster
while self.can_run is True:
for shard_doc in main_conn['config']['shards'].find():
shard_id = shard_doc['_id']
if shard_id in self.shard_set:
if not self.shard_set[shard_id].running:
logging.error("MongoConnector: OplogThread "
"%s unexpectedly stopped! Shutting "
"down" %
(str(self.shard_set[shard_id])))
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
self.write_oplog_progress()
time.sleep(1)
continue
try:
repl_set, hosts = shard_doc['host'].split('/')
except ValueError:
cause = "The system only uses replica sets!"
logging.error("MongoConnector: %s", cause)
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
shard_conn = MongoClient(hosts, replicaSet=repl_set)
oplog_coll = shard_conn['local']['oplog.rs']
#.........这里部分代码省略.........
示例7: TestRollbacks
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestRollbacks(unittest.TestCase):
def tearDown(self):
kill_all()
def setUp(self):
# Create a new oplog progress file
try:
os.unlink("config.txt")
except OSError:
pass
open("config.txt", "w").close()
# Start a replica set
_, self.secondary_p, self.primary_p = start_replica_set('rollbacks')
# Connection to the replica set as a whole
self.main_conn = MongoClient('%s:%d' % (mongo_host, self.primary_p),
replicaSet='rollbacks')
# Connection to the primary specifically
self.primary_conn = MongoClient('%s:%d' % (mongo_host, self.primary_p))
# Connection to the secondary specifically
self.secondary_conn = MongoClient(
'%s:%d' % (mongo_host, self.secondary_p),
read_preference=ReadPreference.SECONDARY_PREFERRED
)
# Wipe any test data
self.main_conn["test"]["mc"].drop()
# Oplog thread
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman = OplogThread(
primary_conn=self.main_conn,
main_address='%s:%d' % (mongo_host, self.primary_p),
oplog_coll=self.main_conn["local"]["oplog.rs"],
is_sharded=False,
doc_manager=doc_manager,
oplog_progress_dict=oplog_progress,
namespace_set=["test.mc"],
auth_key=None,
auth_username=None,
repl_set="rollbacks"
)
def test_single_target(self):
"""Test with a single replication target"""
self.opman.start()
# Insert first document with primary up
self.main_conn["test"]["mc"].insert({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
self.assertTrue(wait_for(lambda: secondary["test"]["mc"].count() == 1),
"first write didn't replicate to secondary")
# Kill the primary
kill_mongo_proc(self.primary_p, destroy=False)
# Wait for the secondary to be promoted
while not secondary["admin"].command("isMaster")["ismaster"]:
time.sleep(1)
# Insert another document. This will be rolled back later
retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1})
self.assertEqual(secondary["test"]["mc"].count(), 2)
# Wait for replication to doc manager
c = lambda: len(self.opman.doc_managers[0]._search()) == 2
self.assertTrue(wait_for(c),
"not all writes were replicated to doc manager")
# Kill the new primary
kill_mongo_proc(self.secondary_p, destroy=False)
# Start both servers back up
restart_mongo_proc(self.primary_p)
primary_admin = self.primary_conn["admin"]
while not primary_admin.command("isMaster")["ismaster"]:
time.sleep(1)
restart_mongo_proc(self.secondary_p)
while secondary["admin"].command("replSetGetStatus")["myState"] != 2:
time.sleep(1)
while retry_until_ok(self.main_conn["test"]["mc"].find().count) == 0:
time.sleep(1)
# Only first document should exist in MongoDB
self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)
# Same case should hold for the doc manager
doc_manager = self.opman.doc_managers[0]
self.assertEqual(len(doc_manager._search()), 1)
self.assertEqual(doc_manager._search()[0]["i"], 0)
# cleanup
self.opman.join()
#.........这里部分代码省略.........
示例8: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
_, _, self.primary_p = start_replica_set("test-oplog-manager")
self.primary_conn = pymongo.MongoClient(mongo_host, self.primary_p)
self.oplog_coll = self.primary_conn.local["oplog.rs"]
self.opman = OplogThread(
primary_conn=self.primary_conn,
main_address="%s:%d" % (mongo_host, self.primary_p),
oplog_coll=self.oplog_coll,
is_sharded=False,
doc_manager=DocManager(),
oplog_progress_dict=LockingDict(),
namespace_set=None,
auth_key=None,
auth_username=None,
repl_set="test-oplog-manager",
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.close()
kill_replica_set("test-oplog-manager")
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp is None - all oplog entries are returned.
cursor = self.opman.get_oplog_cursor(None)
self.assertEqual(cursor.count(), self.primary_conn["local"]["oplog.rs"].count())
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)["o"]["_id"]
retrieved = self.primary_conn.test.test.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert({"i": i} for i in range(2, 1002))
oplog_cursor = self.oplog_coll.find(sort=[("ts", pymongo.ASCENDING)])
# startup + insert + 1000 inserts
self.assertEqual(oplog_cursor.count(), 2 + 1000)
pivot = oplog_cursor.skip(400).limit(1)[0]
goc_cursor = self.opman.get_oplog_cursor(pivot["ts"])
self.assertEqual(goc_cursor.count(), 2 + 1000 - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.assertEqual(self.opman.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert({"i": i + 500})
oplog = self.primary_conn["local"]["oplog.rs"]
oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(1)[0]
self.assertEqual(self.opman.get_last_oplog_timestamp(), oplog["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert({"i": i + 500})
last_ts = self.opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, self.opman.dump_collection())
self.assertEqual(len(self.opman.doc_managers[0]._search()), 1000)
def test_dump_collection_with_error(self):
"""Test the dump_collection method with invalid documents.
Cases:
#.........这里部分代码省略.........
示例9: TestOplogManagerSharded
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestOplogManagerSharded(unittest.TestCase):
"""Defines all test cases for OplogThreads running on a sharded
cluster
"""
def setUp(self):
""" Initialize the cluster:
Clean out the databases used by the tests
Make connections to mongos, mongods
Create and shard test collections
Create OplogThreads
"""
self.cluster = ShardedCluster().start()
# Connection to mongos
self.mongos_conn = self.cluster.client()
# Connections to the shards
self.shard1_conn = self.cluster.shards[0].client()
self.shard2_conn = self.cluster.shards[1].client()
self.shard1_secondary_conn = self.cluster.shards[0].secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED)
self.shard2_secondary_conn = self.cluster.shards[1].secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED
)
# Wipe any test data
self.mongos_conn["test"]["mcsharded"].drop()
# Create and shard the collection test.mcsharded on the "i" field
self.mongos_conn["test"]["mcsharded"].create_index("i")
self.mongos_conn.admin.command("enableSharding", "test")
self.mongos_conn.admin.command("shardCollection",
"test.mcsharded",
key={"i": 1})
# Pre-split the collection so that:
# i < 1000 lives on shard1
# i >= 1000 lives on shard2
self.mongos_conn.admin.command(bson.SON([
("split", "test.mcsharded"),
("middle", {"i": 1000})
]))
# disable the balancer
self.mongos_conn.config.settings.update_one(
{"_id": "balancer"},
{"$set": {"stopped": True}},
upsert=True
)
# Move chunks to their proper places
try:
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1},
to='demo-set-0'
)
except pymongo.errors.OperationFailure:
pass
try:
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1000},
to='demo-set-1'
)
except pymongo.errors.OperationFailure:
pass
# Make sure chunks are distributed correctly
self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1})
self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1000})
def chunks_moved():
doc1 = self.shard1_conn.test.mcsharded.find_one()
doc2 = self.shard2_conn.test.mcsharded.find_one()
if None in (doc1, doc2):
return False
return doc1['i'] == 1 and doc2['i'] == 1000
assert_soon(chunks_moved, max_tries=120,
message='chunks not moved? doc1=%r, doc2=%r' % (
self.shard1_conn.test.mcsharded.find_one(),
self.shard2_conn.test.mcsharded.find_one()))
self.mongos_conn.test.mcsharded.delete_many({})
# create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Oplog threads (oplog manager) for each shard
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman1 = OplogThread(
primary_client=self.shard1_conn,
#.........这里部分代码省略.........
示例10: run
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
def run(self):
"""Discovers the mongo cluster and creates a thread for each primary.
"""
self.main_conn = self.create_authed_client()
LOG.always('Source MongoDB version: %s',
self.main_conn.admin.command('buildInfo')['version'])
for dm in self.doc_managers:
name = dm.__class__.__module__
module = sys.modules[name]
version = 'unknown'
if hasattr(module, '__version__'):
version = module.__version__
elif hasattr(module, 'version'):
version = module.version
LOG.always('Target DocManager: %s version: %s', name, version)
self.read_oplog_progress()
conn_type = None
try:
self.main_conn.admin.command("isdbgrid")
except pymongo.errors.OperationFailure:
conn_type = "REPLSET"
if conn_type == "REPLSET":
# Make sure we are connected to a replica set
is_master = self.main_conn.admin.command("isMaster")
if "setName" not in is_master:
LOG.error(
'No replica set at "%s"! A replica set is required '
'to run mongo-connector. Shutting down...' % self.address
)
return
# Establish a connection to the replica set as a whole
self.main_conn.close()
self.main_conn = self.create_authed_client(
replicaSet=is_master['setName'])
# non sharded configuration
oplog = OplogThread(
self.main_conn, self.doc_managers, self.oplog_progress,
self.namespace_config, **self.kwargs)
self.shard_set[0] = oplog
LOG.info('MongoConnector: Starting connection thread %s' %
self.main_conn)
oplog.start()
while self.can_run:
shard_thread = self.shard_set[0]
if not (shard_thread.running and shard_thread.is_alive()):
LOG.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[0])))
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
self.write_oplog_progress()
time.sleep(1)
else: # sharded cluster
while self.can_run:
for shard_doc in retry_until_ok(self.main_conn.admin.command,
'listShards')['shards']:
shard_id = shard_doc['_id']
if shard_id in self.shard_set:
shard_thread = self.shard_set[shard_id]
if not (shard_thread.running and shard_thread.is_alive()):
LOG.error("MongoConnector: OplogThread "
"%s unexpectedly stopped! Shutting "
"down" %
(str(self.shard_set[shard_id])))
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
self.write_oplog_progress()
time.sleep(1)
continue
try:
repl_set, hosts = shard_doc['host'].split('/')
except ValueError:
cause = "The system only uses replica sets!"
LOG.exception("MongoConnector: %s", cause)
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
shard_conn = self.create_authed_client(
hosts, replicaSet=repl_set)
oplog = OplogThread(
shard_conn, self.doc_managers, self.oplog_progress,
self.namespace_config, mongos_client=self.main_conn,
**self.kwargs)
#.........这里部分代码省略.........
示例11: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
self.repl_set = ReplicaSetSingle().start()
self.primary_conn = self.repl_set.client()
self.oplog_coll = self.primary_conn.local["oplog.rs"]
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=NamespaceConfig(
namespace_options={"test.*": True, "gridfs.*": {"gridfs": True}}
),
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.drop_database("test")
close_client(self.primary_conn)
self.repl_set.stop()
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp is None - all oplog entries excluding no-ops are returned.
cursor = self.opman.get_oplog_cursor(None)
self.assertEqual(
cursor.count(),
self.primary_conn["local"]["oplog.rs"].find({"op": {"$ne": "n"}}).count(),
)
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert_one(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)["o"]["_id"]
retrieved = self.primary_conn.test.test.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert_many(
[{"i": i} for i in range(2, 1002)]
)
oplog_cursor = self.oplog_coll.find(
{"op": {"$ne": "n"}, "ns": {"$not": re.compile(r"\.(system|\$cmd)")}},
sort=[("ts", pymongo.ASCENDING)],
)
# initial insert + 1000 more inserts
self.assertEqual(oplog_cursor.count(), 1 + 1000)
pivot = oplog_cursor.skip(400).limit(-1)[0]
goc_cursor = self.opman.get_oplog_cursor(pivot["ts"])
self.assertEqual(goc_cursor.count(), 1 + 1000 - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.assertEqual(self.opman.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({"i": i + 500})
oplog = self.primary_conn["local"]["oplog.rs"]
oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
self.assertEqual(self.opman.get_last_oplog_timestamp(), oplog["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog, with gridfs collections
3. non-empty oplog, specified a namespace-set, none of the oplog
entries are for collections in the namespace-set
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog with gridfs collections
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
# Insert 10 gridfs files
for i in range(10):
fs = gridfs.GridFS(self.primary_conn["gridfs"], collection="test" + str(i))
#.........这里部分代码省略.........
示例12: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
_, _, self.primary_p = start_replica_set('test-oplog-manager')
self.primary_conn = pymongo.MongoClient(mongo_host, self.primary_p)
self.oplog_coll = self.primary_conn.local['oplog.rs']
self.opman = OplogThread(
primary_conn=self.primary_conn,
main_address='%s:%d' % (mongo_host, self.primary_p),
oplog_coll=self.oplog_coll,
is_sharded=False,
doc_manager=DocManager(),
oplog_progress_dict=LockingDict(),
namespace_set=None,
auth_key=None,
auth_username=None,
repl_set='test-oplog-manager'
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.close()
kill_replica_set('test-oplog-manager')
def test_retrieve_doc(self):
""" Test the retrieve_doc method """
# Trivial case where the oplog entry is None
self.assertEqual(self.opman.retrieve_doc(None), None)
# Retrieve a document from insert operation in oplog
doc = {"name": "mango", "type": "fruit",
"ns": "test.test", "weight": 3.24, "i": 1}
self.primary_conn["test"]["test"].insert(doc)
oplog_entries = self.primary_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
oplog_entry = next(oplog_entries)
self.assertEqual(self.opman.retrieve_doc(oplog_entry), doc)
# Retrieve a document from update operation in oplog
self.primary_conn["test"]["test"].update(
{"i": 1},
{"$set": {"sounds-like": "mongo"}}
)
oplog_entries = self.primary_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
doc["sounds-like"] = "mongo"
self.assertEqual(self.opman.retrieve_doc(next(oplog_entries)), doc)
# Retrieve a document from remove operation in oplog
# (expected: None)
self.primary_conn["test"]["test"].remove({
"i": 1
})
oplog_entries = self.primary_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
self.assertEqual(self.opman.retrieve_doc(next(oplog_entries)), None)
# Retrieve a document with bad _id
# (expected: None)
oplog_entry["o"]["_id"] = "ThisIsNotAnId123456789"
self.assertEqual(self.opman.retrieve_doc(oplog_entry), None)
def test_get_oplog_cursor(self):
'''Test the get_oplog_cursor method'''
# Trivial case: timestamp is None
self.assertEqual(self.opman.get_oplog_cursor(None), None)
# earliest entry is after given timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert(doc)
self.assertEqual(self.opman.get_oplog_cursor(
bson.Timestamp(1, 0)), None)
# earliest entry is the only one at/after timestamp
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
self.assertEqual(self.opman.retrieve_doc(next(cursor)), doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert(
{"i": i} for i in range(2, 1002))
oplog_cursor = self.oplog_coll.find(
sort=[("ts", pymongo.ASCENDING)]
)
#.........这里部分代码省略.........
示例13: TestCommandReplication
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestCommandReplication(unittest.TestCase):
def setUp(self):
self.repl_set = ReplicaSetSingle().start()
self.primary_conn = self.repl_set.client()
self.oplog_progress = LockingDict()
self.opman = None
def tearDown(self):
try:
if self.opman:
self.opman.join()
except RuntimeError:
pass
close_client(self.primary_conn)
self.repl_set.stop()
def initOplogThread(self, namespace_set=None):
self.docman = CommandLoggerDocManager()
namespace_config = NamespaceConfig(namespace_set=namespace_set)
self.docman.command_helper = CommandHelper(namespace_config)
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(self.docman,),
oplog_progress_dict=self.oplog_progress,
namespace_config=namespace_config,
collection_dump=False,
)
self.opman.start()
def test_command_helper(self):
mapping = {"a.x": "b.x", "a.y": "c.y"}
helper = CommandHelper(
NamespaceConfig(
namespace_set=list(mapping) + ["a.z"], namespace_options=mapping
)
)
self.assertEqual(set(helper.map_db("a")), set(["a", "b", "c"]))
self.assertEqual(helper.map_db("d"), [])
self.assertEqual(helper.map_namespace("a.x"), "b.x")
self.assertEqual(helper.map_namespace("a.z"), "a.z")
self.assertEqual(helper.map_namespace("d.x"), None)
self.assertEqual(helper.map_collection("a", "x"), ("b", "x"))
self.assertEqual(helper.map_collection("a", "z"), ("a", "z"))
self.assertEqual(helper.map_collection("d", "x"), (None, None))
def test_create_collection(self):
self.initOplogThread()
pymongo.collection.Collection(self.primary_conn["test"], "test", create=True)
assert_soon(lambda: self.docman.commands)
command = self.docman.commands[0]
self.assertEqual(command["create"], "test")
def test_create_collection_skipped(self):
self.initOplogThread(["test.test"])
pymongo.collection.Collection(self.primary_conn["test2"], "test2", create=True)
pymongo.collection.Collection(self.primary_conn["test"], "test", create=True)
assert_soon(lambda: self.docman.commands)
self.assertEqual(len(self.docman.commands), 1)
command = self.docman.commands[0]
self.assertEqual(command["create"], "test")
def test_drop_collection(self):
self.initOplogThread()
coll = pymongo.collection.Collection(
self.primary_conn["test"], "test", create=True
)
coll.drop()
assert_soon(lambda: len(self.docman.commands) == 2)
self.assertEqual(self.docman.commands[1], {"drop": "test"})
def test_drop_database(self):
self.initOplogThread()
pymongo.collection.Collection(self.primary_conn["test"], "test", create=True)
self.primary_conn.drop_database("test")
assert_soon(lambda: len(self.docman.commands) == 2)
self.assertEqual(self.docman.commands[1], {"dropDatabase": 1})
def test_rename_collection(self):
self.initOplogThread()
coll = pymongo.collection.Collection(
self.primary_conn["test"], "test", create=True
)
coll.rename("test2")
assert_soon(lambda: len(self.docman.commands) == 2)
self.assertEqual(self.docman.commands[1].get("renameCollection"), "test.test")
self.assertEqual(self.docman.commands[1].get("to"), "test.test2")
示例14: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
self.repl_set = ReplicaSetSingle().start()
self.primary_conn = self.repl_set.client()
self.oplog_coll = self.primary_conn.local['oplog.rs']
def reset_opman(self, include_ns=None, exclude_ns=None, dest_mapping=None):
if include_ns is None:
include_ns = []
if exclude_ns is None:
exclude_ns = []
if dest_mapping is None:
dest_mapping = {}
# include_ns must not exist together with exclude_ns
# dest_mapping must exist together with include_ns
# those checks have been tested in test_config.py so we skip that here.
self.dest_mapping_stru = DestMapping(include_ns, exclude_ns,
dest_mapping)
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
dest_mapping_stru=self.dest_mapping_stru,
ns_set=include_ns,
ex_ns_set=exclude_ns
)
def init_dbs(self):
# includedb1.* & includedb2.includecol1 are interested collections
self.primary_conn["includedb1"]["includecol1"].insert_many(
[{"idb1col1": i} for i in range(1, 3)])
self.primary_conn["includedb1"]["includecol2"].insert_many(
[{"idb1col2": i} for i in range(1, 3)])
self.primary_conn["includedb2"]["includecol1"].insert_many(
[{"idb2col1": i} for i in range(1, 3)])
# the others are not interested collections
self.primary_conn["includedb2"]["excludecol2"].insert_many(
[{"idb2col2": i} for i in range(1, 3)])
self.primary_conn["excludedb3"]["excludecol1"].insert_many(
[{"idb3col1": i} for i in range(1, 3)])
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
for db in self.primary_conn.database_names():
if db != "local":
self.primary_conn.drop_database(db)
close_client(self.primary_conn)
self.repl_set.stop()
def test_get_oplog_cursor(self):
'''Test the get_oplog_cursor method'''
# Put something in the dbs
self.init_dbs()
# timestamp is None - all oplog entries excluding no-ops are returned.
# wildcard include case no impact the result
self.reset_opman(["includedb1.*", "includedb2.includecol1"], [], {})
got_cursor = self.opman.get_oplog_cursor(None)
oplog_cursor = self.oplog_coll.find(
{'op': {'$ne': 'n'}})
self.assertNotEqual(got_cursor, None)
self.assertEqual(got_cursor.count(), oplog_cursor.count())
# wildcard exclude case no impact the result
self.reset_opman([], ["includedb2.excludecol2", "excludedb3.*"], {})
got_cursor = self.opman.get_oplog_cursor(None)
oplog_cursor = self.oplog_coll.find(
{'op': {'$ne': 'n'}})
self.assertNotEqual(got_cursor, None)
self.assertEqual(got_cursor.count(), oplog_cursor.count())
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "idb1col1": 1}
self.primary_conn["includedb1"]["includecol1"].insert_one(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)['o']['_id']
retrieved = self.primary_conn.includedb1.includecol1.find_one(
next_entry_id)
self.assertEqual(retrieved, doc)
#.........这里部分代码省略.........
示例15: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import start [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
self.repl_set = ReplicaSet().start()
self.primary_conn = self.repl_set.client()
self.oplog_coll = self.primary_conn.local['oplog.rs']
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict()
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.drop_database("test")
close_client(self.primary_conn)
self.repl_set.stop()
def test_get_oplog_cursor(self):
'''Test the get_oplog_cursor method'''
# timestamp is None - all oplog entries are returned.
cursor = self.opman.get_oplog_cursor(None)
self.assertEqual(cursor.count(),
self.primary_conn["local"]["oplog.rs"].count())
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert_one(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)['o']['_id']
retrieved = self.primary_conn.test.test.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert_many(
[{"i": i} for i in range(2, 1002)])
oplog_cursor = self.oplog_coll.find(
{'op': {'$ne': 'n'},
'ns': {'$not': re.compile(r'\.(system|\$cmd)')}},
sort=[("ts", pymongo.ASCENDING)]
)
# initial insert + 1000 more inserts
self.assertEqual(oplog_cursor.count(), 1 + 1000)
pivot = oplog_cursor.skip(400).limit(-1)[0]
goc_cursor = self.opman.get_oplog_cursor(pivot["ts"])
self.assertEqual(goc_cursor.count(), 1 + 1000 - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.assertEqual(self.opman.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({
"i": i + 500
})
oplog = self.primary_conn["local"]["oplog.rs"]
oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
self.assertEqual(self.opman.get_last_oplog_timestamp(),
oplog["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({
"i": i + 500
})
last_ts = self.opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, self.opman.dump_collection())
self.assertEqual(len(self.opman.doc_managers[0]._search()), 1000)
#.........这里部分代码省略.........