本文整理汇总了Python中mongo_connector.oplog_manager.OplogThread.get_last_oplog_timestamp方法的典型用法代码示例。如果您正苦于以下问题:Python OplogThread.get_last_oplog_timestamp方法的具体用法?Python OplogThread.get_last_oplog_timestamp怎么用?Python OplogThread.get_last_oplog_timestamp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mongo_connector.oplog_manager.OplogThread
的用法示例。
在下文中一共展示了OplogThread.get_last_oplog_timestamp方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_skipped_oplog_entry_updates_checkpoint
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import get_last_oplog_timestamp [as 别名]
def test_skipped_oplog_entry_updates_checkpoint(self):
repl_set = ReplicaSetSingle().start()
conn = repl_set.client()
opman = OplogThread(
primary_client=conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=NamespaceConfig(namespace_set=["test.test"]),
)
opman.start()
# Insert a document into an included collection
conn["test"]["test"].insert_one({"test": 1})
last_ts = opman.get_last_oplog_timestamp()
assert_soon(
lambda: last_ts == opman.checkpoint,
"OplogThread never updated checkpoint to non-skipped " "entry.",
)
self.assertEqual(len(opman.doc_managers[0]._search()), 1)
# Make sure that the oplog thread updates its checkpoint on every
# oplog entry.
conn["test"]["ignored"].insert_one({"test": 1})
last_ts = opman.get_last_oplog_timestamp()
assert_soon(
lambda: last_ts == opman.checkpoint,
"OplogThread never updated checkpoint to skipped entry.",
)
opman.join()
conn.close()
repl_set.stop()
示例2: test_dump_collection
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import get_last_oplog_timestamp [as 别名]
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog, with gridfs collections
3. non-empty oplog, specified a namespace-set, none of the oplog
entries are for collections in the namespace-set
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog with gridfs collections
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
# Insert 10 gridfs files
for i in range(10):
fs = gridfs.GridFS(self.primary_conn["gridfs"],
collection="test" + str(i))
fs.put(b"hello world")
# Insert 1000 documents
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({
"i": i + 500
})
last_ts = self.opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, self.opman.dump_collection())
self.assertEqual(len(self.opman.doc_managers[0]._search()), 1010)
# Case 3
# 1MB oplog so that we can rollover quickly
repl_set = ReplicaSetSingle(oplogSize=1).start()
conn = repl_set.client()
opman = OplogThread(
primary_client=conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=NamespaceConfig(namespace_set=["test.test"]),
)
# Insert a document into an included collection
conn["test"]["test"].insert_one({"test": 1})
# Cause the oplog to rollover on a non-included collection
while conn["local"]["oplog.rs"].find_one({"ns": "test.test"}):
conn["test"]["ignored"].insert_many(
[{"test": "1" * 1024} for _ in range(1024)])
last_ts = opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, opman.dump_collection())
self.assertEqual(len(opman.doc_managers[0]._search()), 1)
conn.close()
repl_set.stop()
示例3: test_dump_collection
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import get_last_oplog_timestamp [as 别名]
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog
3. non-empty oplog, specified a namespace-set, none of the oplog
entries are for collections in the namespace-set
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({
"i": i + 500
})
last_ts = self.opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, self.opman.dump_collection())
self.assertEqual(len(self.opman.doc_managers[0]._search()), 1000)
# Case 3
# 1MB oplog so that we can rollover quickly
repl_set = ReplicaSetSingle(oplogSize=1).start()
conn = repl_set.client()
dest_mapping_stru = DestMapping(["test.test"], [], {})
opman = OplogThread(
primary_client=conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
dest_mapping_stru=dest_mapping_stru,
ns_set=set(["test.test"])
)
# Insert a document into a ns_set collection
conn["test"]["test"].insert_one({"test": 1})
# Cause the oplog to rollover on a non-ns_set collection
while conn["local"]["oplog.rs"].find_one({"ns": "test.test"}):
conn["test"]["ignored"].insert_many(
[{"test": "1" * 1024} for _ in range(1024)])
last_ts = opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, opman.dump_collection())
self.assertEqual(len(opman.doc_managers[0]._search()), 1)
conn.close()
repl_set.stop()
示例4: TestOplogManagerSharded
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import get_last_oplog_timestamp [as 别名]
#.........这里部分代码省略.........
)
doc["sounds-like"] = "mongo"
self.assertEqual(self.opman1.retrieve_doc(next(oplog_entries)), doc)
# Retrieve a document from remove operation in oplog
# (expected: None)
self.mongos_conn["test"]["mcsharded"].remove({
"i": 1
})
oplog_entries = self.shard1_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
self.assertEqual(self.opman1.retrieve_doc(next(oplog_entries)), None)
# Retrieve a document with bad _id
# (expected: None)
oplog_entry["o"]["_id"] = "ThisIsNotAnId123456789"
self.assertEqual(self.opman1.retrieve_doc(oplog_entry), None)
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# Trivial case: timestamp is None
self.assertEqual(self.opman1.get_oplog_cursor(None), None)
# earliest entry is after given timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.mongos_conn["test"]["mcsharded"].insert(doc)
self.assertEqual(self.opman1.get_oplog_cursor(
bson.Timestamp(1, 0)), None)
# earliest entry is the only one at/after timestamp
latest_timestamp = self.opman1.get_last_oplog_timestamp()
cursor = self.opman1.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
self.assertEqual(self.opman1.retrieve_doc(cursor[0]), doc)
# many entries before and after timestamp
for i in range(2, 2002):
self.mongos_conn["test"]["mcsharded"].insert({
"i": i
})
oplog1 = self.shard1_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.ASCENDING)]
)
oplog2 = self.shard2_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.ASCENDING)]
)
# oplogs should have records for inserts performed, plus
# various other messages
oplog1_count = oplog1.count()
oplog2_count = oplog2.count()
self.assertGreaterEqual(oplog1_count, 998)
self.assertGreaterEqual(oplog2_count, 1002)
pivot1 = oplog1.skip(400).limit(1)[0]
pivot2 = oplog2.skip(400).limit(1)[0]
cursor1 = self.opman1.get_oplog_cursor(pivot1["ts"])
cursor2 = self.opman2.get_oplog_cursor(pivot2["ts"])
self.assertEqual(cursor1.count(), oplog1_count - 400)
self.assertEqual(cursor2.count(), oplog2_count - 400)
# get_oplog_cursor fast-forwards *one doc beyond* the given timestamp
示例5: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import get_last_oplog_timestamp [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
_, _, self.primary_p = start_replica_set("test-oplog-manager")
self.primary_conn = pymongo.MongoClient(mongo_host, self.primary_p)
self.oplog_coll = self.primary_conn.local["oplog.rs"]
self.opman = OplogThread(
primary_conn=self.primary_conn,
main_address="%s:%d" % (mongo_host, self.primary_p),
oplog_coll=self.oplog_coll,
is_sharded=False,
doc_manager=DocManager(),
oplog_progress_dict=LockingDict(),
namespace_set=None,
auth_key=None,
auth_username=None,
repl_set="test-oplog-manager",
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.close()
kill_replica_set("test-oplog-manager")
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp is None - all oplog entries are returned.
cursor = self.opman.get_oplog_cursor(None)
self.assertEqual(cursor.count(), self.primary_conn["local"]["oplog.rs"].count())
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)["o"]["_id"]
retrieved = self.primary_conn.test.test.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert({"i": i} for i in range(2, 1002))
oplog_cursor = self.oplog_coll.find(sort=[("ts", pymongo.ASCENDING)])
# startup + insert + 1000 inserts
self.assertEqual(oplog_cursor.count(), 2 + 1000)
pivot = oplog_cursor.skip(400).limit(1)[0]
goc_cursor = self.opman.get_oplog_cursor(pivot["ts"])
self.assertEqual(goc_cursor.count(), 2 + 1000 - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.assertEqual(self.opman.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert({"i": i + 500})
oplog = self.primary_conn["local"]["oplog.rs"]
oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(1)[0]
self.assertEqual(self.opman.get_last_oplog_timestamp(), oplog["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert({"i": i + 500})
last_ts = self.opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, self.opman.dump_collection())
self.assertEqual(len(self.opman.doc_managers[0]._search()), 1000)
def test_dump_collection_with_error(self):
"""Test the dump_collection method with invalid documents.
Cases:
#.........这里部分代码省略.........
示例6: TestOplogManagerSharded
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import get_last_oplog_timestamp [as 别名]
#.........这里部分代码省略.........
self.opman1.join()
except RuntimeError:
pass # thread may not have been started
try:
self.opman2.join()
except RuntimeError:
pass # thread may not have been started
close_client(self.mongos_conn)
close_client(self.shard1_conn)
close_client(self.shard2_conn)
close_client(self.shard1_secondary_conn)
close_client(self.shard2_secondary_conn)
self.cluster.stop()
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp = None
cursor1 = self.opman1.get_oplog_cursor(None)
oplog1 = self.shard1_conn["local"]["oplog.rs"].find(
{'op': {'$ne': 'n'},
'ns': {'$not': re.compile(r'\.system')}})
self.assertEqual(list(cursor1), list(oplog1))
cursor2 = self.opman2.get_oplog_cursor(None)
oplog2 = self.shard2_conn["local"]["oplog.rs"].find(
{'op': {'$ne': 'n'},
'ns': {'$not': re.compile(r'\.system')}})
self.assertEqual(list(cursor2), list(oplog2))
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.mongos_conn["test"]["mcsharded"].insert_one(doc)
latest_timestamp = self.opman1.get_last_oplog_timestamp()
cursor = self.opman1.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = cursor[0]['o']['_id']
retrieved = self.mongos_conn.test.mcsharded.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
for i in range(2, 2002):
self.mongos_conn["test"]["mcsharded"].insert_one({
"i": i
})
oplog1 = self.shard1_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.ASCENDING)]
)
oplog2 = self.shard2_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.ASCENDING)]
)
# oplogs should have records for inserts performed, plus
# various other messages
oplog1_count = oplog1.count()
oplog2_count = oplog2.count()
self.assertGreaterEqual(oplog1_count, 998)
self.assertGreaterEqual(oplog2_count, 1002)
pivot1 = oplog1.skip(400).limit(-1)[0]
pivot2 = oplog2.skip(400).limit(-1)[0]
cursor1 = self.opman1.get_oplog_cursor(pivot1["ts"])
cursor2 = self.opman2.get_oplog_cursor(pivot2["ts"])
self.assertEqual(cursor1.count(), oplog1_count - 400)
self.assertEqual(cursor2.count(), oplog2_count - 400)
示例7: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import get_last_oplog_timestamp [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
self.repl_set = ReplicaSetSingle().start()
self.primary_conn = self.repl_set.client()
self.oplog_coll = self.primary_conn.local["oplog.rs"]
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=NamespaceConfig(
namespace_options={"test.*": True, "gridfs.*": {"gridfs": True}}
),
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.drop_database("test")
close_client(self.primary_conn)
self.repl_set.stop()
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp is None - all oplog entries excluding no-ops are returned.
cursor = self.opman.get_oplog_cursor(None)
self.assertEqual(
cursor.count(),
self.primary_conn["local"]["oplog.rs"].find({"op": {"$ne": "n"}}).count(),
)
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert_one(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)["o"]["_id"]
retrieved = self.primary_conn.test.test.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert_many(
[{"i": i} for i in range(2, 1002)]
)
oplog_cursor = self.oplog_coll.find(
{"op": {"$ne": "n"}, "ns": {"$not": re.compile(r"\.(system|\$cmd)")}},
sort=[("ts", pymongo.ASCENDING)],
)
# initial insert + 1000 more inserts
self.assertEqual(oplog_cursor.count(), 1 + 1000)
pivot = oplog_cursor.skip(400).limit(-1)[0]
goc_cursor = self.opman.get_oplog_cursor(pivot["ts"])
self.assertEqual(goc_cursor.count(), 1 + 1000 - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.assertEqual(self.opman.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({"i": i + 500})
oplog = self.primary_conn["local"]["oplog.rs"]
oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
self.assertEqual(self.opman.get_last_oplog_timestamp(), oplog["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog, with gridfs collections
3. non-empty oplog, specified a namespace-set, none of the oplog
entries are for collections in the namespace-set
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog with gridfs collections
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
# Insert 10 gridfs files
for i in range(10):
fs = gridfs.GridFS(self.primary_conn["gridfs"], collection="test" + str(i))
#.........这里部分代码省略.........
示例8: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import get_last_oplog_timestamp [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
_, _, self.primary_p = start_replica_set('test-oplog-manager')
self.primary_conn = pymongo.MongoClient(mongo_host, self.primary_p)
self.oplog_coll = self.primary_conn.local['oplog.rs']
self.opman = OplogThread(
primary_conn=self.primary_conn,
main_address='%s:%d' % (mongo_host, self.primary_p),
oplog_coll=self.oplog_coll,
is_sharded=False,
doc_manager=DocManager(),
oplog_progress_dict=LockingDict(),
namespace_set=None,
auth_key=None,
auth_username=None,
repl_set='test-oplog-manager'
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.close()
kill_replica_set('test-oplog-manager')
def test_retrieve_doc(self):
""" Test the retrieve_doc method """
# Trivial case where the oplog entry is None
self.assertEqual(self.opman.retrieve_doc(None), None)
# Retrieve a document from insert operation in oplog
doc = {"name": "mango", "type": "fruit",
"ns": "test.test", "weight": 3.24, "i": 1}
self.primary_conn["test"]["test"].insert(doc)
oplog_entries = self.primary_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
oplog_entry = next(oplog_entries)
self.assertEqual(self.opman.retrieve_doc(oplog_entry), doc)
# Retrieve a document from update operation in oplog
self.primary_conn["test"]["test"].update(
{"i": 1},
{"$set": {"sounds-like": "mongo"}}
)
oplog_entries = self.primary_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
doc["sounds-like"] = "mongo"
self.assertEqual(self.opman.retrieve_doc(next(oplog_entries)), doc)
# Retrieve a document from remove operation in oplog
# (expected: None)
self.primary_conn["test"]["test"].remove({
"i": 1
})
oplog_entries = self.primary_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
self.assertEqual(self.opman.retrieve_doc(next(oplog_entries)), None)
# Retrieve a document with bad _id
# (expected: None)
oplog_entry["o"]["_id"] = "ThisIsNotAnId123456789"
self.assertEqual(self.opman.retrieve_doc(oplog_entry), None)
def test_get_oplog_cursor(self):
'''Test the get_oplog_cursor method'''
# Trivial case: timestamp is None
self.assertEqual(self.opman.get_oplog_cursor(None), None)
# earliest entry is after given timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert(doc)
self.assertEqual(self.opman.get_oplog_cursor(
bson.Timestamp(1, 0)), None)
# earliest entry is the only one at/after timestamp
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
self.assertEqual(self.opman.retrieve_doc(next(cursor)), doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert(
{"i": i} for i in range(2, 1002))
oplog_cursor = self.oplog_coll.find(
sort=[("ts", pymongo.ASCENDING)]
)
#.........这里部分代码省略.........
示例9: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import get_last_oplog_timestamp [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
self.repl_set = ReplicaSetSingle().start()
self.primary_conn = self.repl_set.client()
self.oplog_coll = self.primary_conn.local['oplog.rs']
def reset_opman(self, include_ns=None, exclude_ns=None, dest_mapping=None):
if include_ns is None:
include_ns = []
if exclude_ns is None:
exclude_ns = []
if dest_mapping is None:
dest_mapping = {}
# include_ns must not exist together with exclude_ns
# dest_mapping must exist together with include_ns
# those checks have been tested in test_config.py so we skip that here.
self.dest_mapping_stru = DestMapping(include_ns, exclude_ns,
dest_mapping)
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
dest_mapping_stru=self.dest_mapping_stru,
ns_set=include_ns,
ex_ns_set=exclude_ns
)
def init_dbs(self):
# includedb1.* & includedb2.includecol1 are interested collections
self.primary_conn["includedb1"]["includecol1"].insert_many(
[{"idb1col1": i} for i in range(1, 3)])
self.primary_conn["includedb1"]["includecol2"].insert_many(
[{"idb1col2": i} for i in range(1, 3)])
self.primary_conn["includedb2"]["includecol1"].insert_many(
[{"idb2col1": i} for i in range(1, 3)])
# the others are not interested collections
self.primary_conn["includedb2"]["excludecol2"].insert_many(
[{"idb2col2": i} for i in range(1, 3)])
self.primary_conn["excludedb3"]["excludecol1"].insert_many(
[{"idb3col1": i} for i in range(1, 3)])
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
for db in self.primary_conn.database_names():
if db != "local":
self.primary_conn.drop_database(db)
close_client(self.primary_conn)
self.repl_set.stop()
def test_get_oplog_cursor(self):
'''Test the get_oplog_cursor method'''
# Put something in the dbs
self.init_dbs()
# timestamp is None - all oplog entries excluding no-ops are returned.
# wildcard include case no impact the result
self.reset_opman(["includedb1.*", "includedb2.includecol1"], [], {})
got_cursor = self.opman.get_oplog_cursor(None)
oplog_cursor = self.oplog_coll.find(
{'op': {'$ne': 'n'}})
self.assertNotEqual(got_cursor, None)
self.assertEqual(got_cursor.count(), oplog_cursor.count())
# wildcard exclude case no impact the result
self.reset_opman([], ["includedb2.excludecol2", "excludedb3.*"], {})
got_cursor = self.opman.get_oplog_cursor(None)
oplog_cursor = self.oplog_coll.find(
{'op': {'$ne': 'n'}})
self.assertNotEqual(got_cursor, None)
self.assertEqual(got_cursor.count(), oplog_cursor.count())
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "idb1col1": 1}
self.primary_conn["includedb1"]["includecol1"].insert_one(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)['o']['_id']
retrieved = self.primary_conn.includedb1.includecol1.find_one(
next_entry_id)
self.assertEqual(retrieved, doc)
#.........这里部分代码省略.........
示例10: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import get_last_oplog_timestamp [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
self.repl_set = ReplicaSet().start()
self.primary_conn = self.repl_set.client()
self.oplog_coll = self.primary_conn.local['oplog.rs']
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict()
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.drop_database("test")
close_client(self.primary_conn)
self.repl_set.stop()
def test_get_oplog_cursor(self):
'''Test the get_oplog_cursor method'''
# timestamp is None - all oplog entries are returned.
cursor = self.opman.get_oplog_cursor(None)
self.assertEqual(cursor.count(),
self.primary_conn["local"]["oplog.rs"].count())
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert_one(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)['o']['_id']
retrieved = self.primary_conn.test.test.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert_many(
[{"i": i} for i in range(2, 1002)])
oplog_cursor = self.oplog_coll.find(
{'op': {'$ne': 'n'},
'ns': {'$not': re.compile(r'\.(system|\$cmd)')}},
sort=[("ts", pymongo.ASCENDING)]
)
# initial insert + 1000 more inserts
self.assertEqual(oplog_cursor.count(), 1 + 1000)
pivot = oplog_cursor.skip(400).limit(-1)[0]
goc_cursor = self.opman.get_oplog_cursor(pivot["ts"])
self.assertEqual(goc_cursor.count(), 1 + 1000 - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.assertEqual(self.opman.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({
"i": i + 500
})
oplog = self.primary_conn["local"]["oplog.rs"]
oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
self.assertEqual(self.opman.get_last_oplog_timestamp(),
oplog["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({
"i": i + 500
})
last_ts = self.opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, self.opman.dump_collection())
self.assertEqual(len(self.opman.doc_managers[0]._search()), 1000)
#.........这里部分代码省略.........