本文整理汇总了Python中mongo_connector.oplog_manager.OplogThread.join方法的典型用法代码示例。如果您正苦于以下问题:Python OplogThread.join方法的具体用法?Python OplogThread.join怎么用?Python OplogThread.join使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mongo_connector.oplog_manager.OplogThread
的用法示例。
在下文中一共展示了OplogThread.join方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_skipped_oplog_entry_updates_checkpoint
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
def test_skipped_oplog_entry_updates_checkpoint(self):
repl_set = ReplicaSetSingle().start()
conn = repl_set.client()
opman = OplogThread(
primary_client=conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=NamespaceConfig(namespace_set=["test.test"]),
)
opman.start()
# Insert a document into an included collection
conn["test"]["test"].insert_one({"test": 1})
last_ts = opman.get_last_oplog_timestamp()
assert_soon(
lambda: last_ts == opman.checkpoint,
"OplogThread never updated checkpoint to non-skipped " "entry.",
)
self.assertEqual(len(opman.doc_managers[0]._search()), 1)
# Make sure that the oplog thread updates its checkpoint on every
# oplog entry.
conn["test"]["ignored"].insert_one({"test": 1})
last_ts = opman.get_last_oplog_timestamp()
assert_soon(
lambda: last_ts == opman.checkpoint,
"OplogThread never updated checkpoint to skipped entry.",
)
opman.join()
conn.close()
repl_set.stop()
示例2: TestRollbacks
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
class TestRollbacks(unittest.TestCase):
def tearDown(self):
close_client(self.primary_conn)
close_client(self.secondary_conn)
self.repl_set.stop()
def setUp(self):
# Create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Start a replica set
self.repl_set = ReplicaSet().start()
# Connection to the replica set as a whole
self.main_conn = self.repl_set.client()
# Connection to the primary specifically
self.primary_conn = self.repl_set.primary.client()
# Connection to the secondary specifically
self.secondary_conn = self.repl_set.secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED)
# Wipe any test data
self.main_conn["test"]["mc"].drop()
# Oplog thread
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman = OplogThread(
primary_client=self.main_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
ns_set=["test.mc"]
)
def test_single_target(self):
"""Test with a single replication target"""
self.opman.start()
# Insert first document with primary up
self.main_conn["test"]["mc"].insert_one({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary")
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary["admin"].command("isMaster")["ismaster"])
# Insert another document. This will be rolled back later
retry_until_ok(self.main_conn["test"]["mc"].insert_one, {"i": 1})
self.assertEqual(secondary["test"]["mc"].count(), 2)
# Wait for replication to doc manager
assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 2,
"not all writes were replicated to doc manager")
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status")
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(secondary.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
assert_soon(lambda:
retry_until_ok(self.main_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted")
# Only first document should exist in MongoDB
self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)
# Same case should hold for the doc manager
doc_manager = self.opman.doc_managers[0]
assert_soon(lambda: len(doc_manager._search()) == 1,
'documents never rolled back in doc manager.')
self.assertEqual(doc_manager._search()[0]["i"], 0)
# cleanup
self.opman.join()
def test_many_targets(self):
"""Test with several replication targets"""
# OplogThread has multiple doc managers
doc_managers = [DocManager(), DocManager(), DocManager()]
#.........这里部分代码省略.........
示例3: TestOplogManagerSharded
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
#.........这里部分代码省略.........
os.unlink("config.txt")
except OSError:
pass
open("config.txt", "w").close()
# Oplog threads (oplog manager) for each shard
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman1 = OplogThread(
primary_conn=self.shard1_conn,
main_address='%s:%d' % (mongo_host, self.mongos_p),
oplog_coll=self.shard1_conn["local"]["oplog.rs"],
is_sharded=True,
doc_manager=doc_manager,
oplog_progress_dict=oplog_progress,
namespace_set=["test.mcsharded", "test.mcunsharded"],
auth_key=None,
auth_username=None
)
self.opman2 = OplogThread(
primary_conn=self.shard2_conn,
main_address='%s:%d' % (mongo_host, self.mongos_p),
oplog_coll=self.shard2_conn["local"]["oplog.rs"],
is_sharded=True,
doc_manager=doc_manager,
oplog_progress_dict=oplog_progress,
namespace_set=["test.mcsharded", "test.mcunsharded"],
auth_key=None,
auth_username=None
)
def tearDown(self):
try:
self.opman1.join()
except RuntimeError:
pass # thread may not have been started
try:
self.opman2.join()
except RuntimeError:
pass # thread may not have been started
self.mongos_conn.close()
self.shard1_conn.close()
self.shard2_conn.close()
self.shard1_secondary_conn.close()
self.shard2_secondary_conn.close()
kill_all()
def test_retrieve_doc(self):
""" Test the retrieve_doc method """
# Trivial case where the oplog entry is None
self.assertEqual(self.opman1.retrieve_doc(None), None)
# Retrieve a document from insert operation in oplog
doc = {"name": "mango", "type": "fruit",
"ns": "test.mcsharded", "weight": 3.24, "i": 1}
self.mongos_conn["test"]["mcsharded"].insert(doc)
oplog_entries = self.shard1_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
oplog_entry = next(oplog_entries)
self.assertEqual(self.opman1.retrieve_doc(oplog_entry), doc)
# Retrieve a document from update operation in oplog
self.mongos_conn["test"]["mcsharded"].update(
示例4: ShardedClusterTestCase
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
#.........这里部分代码省略.........
self.mongos_conn["test"]["mcsharded"].drop()
# Disable the balancer before creating the collection
self.mongos_conn.config.settings.update_one(
{"_id": "balancer"},
{"$set": {"stopped": True}},
upsert=True
)
# Create and shard the collection test.mcsharded on the "i" field
self.mongos_conn["test"]["mcsharded"].create_index("i")
self.mongos_conn.admin.command("enableSharding", "test")
self.mongos_conn.admin.command("shardCollection",
"test.mcsharded",
key={"i": 1})
# Pre-split the collection so that:
# i < 1000 lives on shard1
# i >= 1000 lives on shard2
self.mongos_conn.admin.command(bson.SON([
("split", "test.mcsharded"),
("middle", {"i": 1000})
]))
# Move chunks to their proper places
try:
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1},
to='demo-set-0'
)
except pymongo.errors.OperationFailure:
pass
try:
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1000},
to='demo-set-1'
)
except pymongo.errors.OperationFailure:
pass
# Make sure chunks are distributed correctly
self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1})
self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1000})
def chunks_moved():
doc1 = self.shard1_conn.test.mcsharded.find_one()
doc2 = self.shard2_conn.test.mcsharded.find_one()
if None in (doc1, doc2):
return False
return doc1['i'] == 1 and doc2['i'] == 1000
assert_soon(chunks_moved, max_tries=120,
message='chunks not moved? doc1=%r, doc2=%r' % (
self.shard1_conn.test.mcsharded.find_one(),
self.shard2_conn.test.mcsharded.find_one()))
self.mongos_conn.test.mcsharded.delete_many({})
# create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Oplog threads (oplog manager) for each shard
doc_manager = DocManager()
oplog_progress = LockingDict()
namespace_config = NamespaceConfig(
namespace_set=["test.mcsharded", "test.mcunsharded"])
self.opman1 = OplogThread(
primary_client=self.shard1_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
namespace_config=namespace_config,
mongos_client=self.mongos_conn
)
self.opman2 = OplogThread(
primary_client=self.shard2_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
namespace_config=namespace_config,
mongos_client=self.mongos_conn
)
def tearDown(self):
try:
self.opman1.join()
except RuntimeError:
pass # thread may not have been started
try:
self.opman2.join()
except RuntimeError:
pass # thread may not have been started
close_client(self.mongos_conn)
close_client(self.shard1_conn)
close_client(self.shard2_conn)
self.cluster.stop()
示例5: TestRollbacks
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
class TestRollbacks(unittest.TestCase):
def tearDown(self):
kill_all()
def setUp(self):
# Create a new oplog progress file
try:
os.unlink("config.txt")
except OSError:
pass
open("config.txt", "w").close()
# Start a replica set
_, self.secondary_p, self.primary_p = start_replica_set('rollbacks')
# Connection to the replica set as a whole
self.main_conn = MongoClient('%s:%d' % (mongo_host, self.primary_p),
replicaSet='rollbacks')
# Connection to the primary specifically
self.primary_conn = MongoClient('%s:%d' % (mongo_host, self.primary_p))
# Connection to the secondary specifically
self.secondary_conn = MongoClient(
'%s:%d' % (mongo_host, self.secondary_p),
read_preference=ReadPreference.SECONDARY_PREFERRED
)
# Wipe any test data
self.main_conn["test"]["mc"].drop()
# Oplog thread
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman = OplogThread(
primary_conn=self.main_conn,
main_address='%s:%d' % (mongo_host, self.primary_p),
oplog_coll=self.main_conn["local"]["oplog.rs"],
is_sharded=False,
doc_manager=doc_manager,
oplog_progress_dict=oplog_progress,
namespace_set=["test.mc"],
auth_key=None,
auth_username=None,
repl_set="rollbacks"
)
def test_single_target(self):
"""Test with a single replication target"""
self.opman.start()
# Insert first document with primary up
self.main_conn["test"]["mc"].insert({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
self.assertTrue(wait_for(lambda: secondary["test"]["mc"].count() == 1),
"first write didn't replicate to secondary")
# Kill the primary
kill_mongo_proc(self.primary_p, destroy=False)
# Wait for the secondary to be promoted
while not secondary["admin"].command("isMaster")["ismaster"]:
time.sleep(1)
# Insert another document. This will be rolled back later
retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1})
self.assertEqual(secondary["test"]["mc"].count(), 2)
# Wait for replication to doc manager
c = lambda: len(self.opman.doc_managers[0]._search()) == 2
self.assertTrue(wait_for(c),
"not all writes were replicated to doc manager")
# Kill the new primary
kill_mongo_proc(self.secondary_p, destroy=False)
# Start both servers back up
restart_mongo_proc(self.primary_p)
primary_admin = self.primary_conn["admin"]
while not primary_admin.command("isMaster")["ismaster"]:
time.sleep(1)
restart_mongo_proc(self.secondary_p)
while secondary["admin"].command("replSetGetStatus")["myState"] != 2:
time.sleep(1)
while retry_until_ok(self.main_conn["test"]["mc"].find().count) == 0:
time.sleep(1)
# Only first document should exist in MongoDB
self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)
# Same case should hold for the doc manager
doc_manager = self.opman.doc_managers[0]
self.assertEqual(len(doc_manager._search()), 1)
self.assertEqual(doc_manager._search()[0]["i"], 0)
# cleanup
self.opman.join()
#.........这里部分代码省略.........
示例6: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
_, _, self.primary_p = start_replica_set("test-oplog-manager")
self.primary_conn = pymongo.MongoClient(mongo_host, self.primary_p)
self.oplog_coll = self.primary_conn.local["oplog.rs"]
self.opman = OplogThread(
primary_conn=self.primary_conn,
main_address="%s:%d" % (mongo_host, self.primary_p),
oplog_coll=self.oplog_coll,
is_sharded=False,
doc_manager=DocManager(),
oplog_progress_dict=LockingDict(),
namespace_set=None,
auth_key=None,
auth_username=None,
repl_set="test-oplog-manager",
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.close()
kill_replica_set("test-oplog-manager")
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp is None - all oplog entries are returned.
cursor = self.opman.get_oplog_cursor(None)
self.assertEqual(cursor.count(), self.primary_conn["local"]["oplog.rs"].count())
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)["o"]["_id"]
retrieved = self.primary_conn.test.test.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert({"i": i} for i in range(2, 1002))
oplog_cursor = self.oplog_coll.find(sort=[("ts", pymongo.ASCENDING)])
# startup + insert + 1000 inserts
self.assertEqual(oplog_cursor.count(), 2 + 1000)
pivot = oplog_cursor.skip(400).limit(1)[0]
goc_cursor = self.opman.get_oplog_cursor(pivot["ts"])
self.assertEqual(goc_cursor.count(), 2 + 1000 - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.assertEqual(self.opman.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert({"i": i + 500})
oplog = self.primary_conn["local"]["oplog.rs"]
oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(1)[0]
self.assertEqual(self.opman.get_last_oplog_timestamp(), oplog["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert({"i": i + 500})
last_ts = self.opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, self.opman.dump_collection())
self.assertEqual(len(self.opman.doc_managers[0]._search()), 1000)
def test_dump_collection_with_error(self):
"""Test the dump_collection method with invalid documents.
Cases:
#.........这里部分代码省略.........
示例7: TestOplogManagerSharded
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
#.........这里部分代码省略.........
assert_soon(chunks_moved, max_tries=120,
message='chunks not moved? doc1=%r, doc2=%r' % (
self.shard1_conn.test.mcsharded.find_one(),
self.shard2_conn.test.mcsharded.find_one()))
self.mongos_conn.test.mcsharded.delete_many({})
# create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Oplog threads (oplog manager) for each shard
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman1 = OplogThread(
primary_client=self.shard1_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
ns_set=["test.mcsharded", "test.mcunsharded"],
mongos_client=self.mongos_conn
)
self.opman2 = OplogThread(
primary_client=self.shard2_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
ns_set=["test.mcsharded", "test.mcunsharded"],
mongos_client=self.mongos_conn
)
def tearDown(self):
try:
self.opman1.join()
except RuntimeError:
pass # thread may not have been started
try:
self.opman2.join()
except RuntimeError:
pass # thread may not have been started
close_client(self.mongos_conn)
close_client(self.shard1_conn)
close_client(self.shard2_conn)
close_client(self.shard1_secondary_conn)
close_client(self.shard2_secondary_conn)
self.cluster.stop()
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp = None
cursor1 = self.opman1.get_oplog_cursor(None)
oplog1 = self.shard1_conn["local"]["oplog.rs"].find(
{'op': {'$ne': 'n'},
'ns': {'$not': re.compile(r'\.system')}})
self.assertEqual(list(cursor1), list(oplog1))
cursor2 = self.opman2.get_oplog_cursor(None)
oplog2 = self.shard2_conn["local"]["oplog.rs"].find(
{'op': {'$ne': 'n'},
'ns': {'$not': re.compile(r'\.system')}})
self.assertEqual(list(cursor2), list(oplog2))
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.mongos_conn["test"]["mcsharded"].insert_one(doc)
示例8: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
self.repl_set = ReplicaSetSingle().start()
self.primary_conn = self.repl_set.client()
self.oplog_coll = self.primary_conn.local["oplog.rs"]
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=NamespaceConfig(
namespace_options={"test.*": True, "gridfs.*": {"gridfs": True}}
),
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.drop_database("test")
close_client(self.primary_conn)
self.repl_set.stop()
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp is None - all oplog entries excluding no-ops are returned.
cursor = self.opman.get_oplog_cursor(None)
self.assertEqual(
cursor.count(),
self.primary_conn["local"]["oplog.rs"].find({"op": {"$ne": "n"}}).count(),
)
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert_one(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)["o"]["_id"]
retrieved = self.primary_conn.test.test.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert_many(
[{"i": i} for i in range(2, 1002)]
)
oplog_cursor = self.oplog_coll.find(
{"op": {"$ne": "n"}, "ns": {"$not": re.compile(r"\.(system|\$cmd)")}},
sort=[("ts", pymongo.ASCENDING)],
)
# initial insert + 1000 more inserts
self.assertEqual(oplog_cursor.count(), 1 + 1000)
pivot = oplog_cursor.skip(400).limit(-1)[0]
goc_cursor = self.opman.get_oplog_cursor(pivot["ts"])
self.assertEqual(goc_cursor.count(), 1 + 1000 - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.assertEqual(self.opman.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({"i": i + 500})
oplog = self.primary_conn["local"]["oplog.rs"]
oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
self.assertEqual(self.opman.get_last_oplog_timestamp(), oplog["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog, with gridfs collections
3. non-empty oplog, specified a namespace-set, none of the oplog
entries are for collections in the namespace-set
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog with gridfs collections
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
# Insert 10 gridfs files
for i in range(10):
fs = gridfs.GridFS(self.primary_conn["gridfs"], collection="test" + str(i))
#.........这里部分代码省略.........
示例9: TestFilterFields
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
class TestFilterFields(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.repl_set = ReplicaSetSingle().start()
cls.primary_conn = cls.repl_set.client()
cls.oplog_coll = cls.primary_conn.local['oplog.rs']
@classmethod
def tearDownClass(cls):
cls.primary_conn.drop_database("test")
close_client(cls.primary_conn)
cls.repl_set.stop()
def setUp(self):
self.dest_mapping_stru = DestMapping([], [], {})
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
dest_mapping_stru=self.dest_mapping_stru
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
# OplogThread may not have been started
pass
def _check_fields(self, opman, fields, exclude_fields, projection):
if fields:
self.assertEqual(sorted(opman.fields), sorted(fields))
self.assertEqual(opman._fields, set(fields))
else:
self.assertEqual(opman.fields, None)
self.assertEqual(opman._fields, set([]))
if exclude_fields:
self.assertEqual(sorted(opman.exclude_fields),
sorted(exclude_fields))
self.assertEqual(opman._exclude_fields, set(exclude_fields))
else:
self.assertEqual(opman.exclude_fields, None)
self.assertEqual(opman._exclude_fields, set([]))
self.assertEqual(opman._projection, projection)
def test_filter_fields(self):
docman = self.opman.doc_managers[0]
conn = self.opman.primary_client
include_fields = ["a", "b", "c"]
exclude_fields = ["d", "e", "f"]
# Set fields to care about
self.opman.fields = include_fields
# Documents have more than just these fields
doc = {
"a": 1, "b": 2, "c": 3,
"d": 4, "e": 5, "f": 6,
"_id": 1
}
db = conn['test']['test']
db.insert_one(doc)
assert_soon(lambda: db.count() == 1)
self.opman.dump_collection()
result = docman._search()[0]
keys = result.keys()
for inc, exc in zip(include_fields, exclude_fields):
self.assertIn(inc, keys)
self.assertNotIn(exc, keys)
def test_filter_exclude_oplog_entry(self):
# Test oplog entries: these are callables, since
# filter_oplog_entry modifies the oplog entry in-place
insert_op = lambda: {
"op": "i",
"o": {
"_id": 0,
"a": 1,
"b": 2,
"c": 3
}
}
update_op = lambda: {
"op": "u",
"o": {
"$set": {
"a": 4,
"b": 5
},
"$unset": {
"c": True
}
},
"o2": {
"_id": 1
}
}
#.........这里部分代码省略.........
示例10: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
_, _, self.primary_p = start_replica_set('test-oplog-manager')
self.primary_conn = pymongo.MongoClient(mongo_host, self.primary_p)
self.oplog_coll = self.primary_conn.local['oplog.rs']
self.opman = OplogThread(
primary_conn=self.primary_conn,
main_address='%s:%d' % (mongo_host, self.primary_p),
oplog_coll=self.oplog_coll,
is_sharded=False,
doc_manager=DocManager(),
oplog_progress_dict=LockingDict(),
namespace_set=None,
auth_key=None,
auth_username=None,
repl_set='test-oplog-manager'
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.close()
kill_replica_set('test-oplog-manager')
def test_retrieve_doc(self):
""" Test the retrieve_doc method """
# Trivial case where the oplog entry is None
self.assertEqual(self.opman.retrieve_doc(None), None)
# Retrieve a document from insert operation in oplog
doc = {"name": "mango", "type": "fruit",
"ns": "test.test", "weight": 3.24, "i": 1}
self.primary_conn["test"]["test"].insert(doc)
oplog_entries = self.primary_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
oplog_entry = next(oplog_entries)
self.assertEqual(self.opman.retrieve_doc(oplog_entry), doc)
# Retrieve a document from update operation in oplog
self.primary_conn["test"]["test"].update(
{"i": 1},
{"$set": {"sounds-like": "mongo"}}
)
oplog_entries = self.primary_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
doc["sounds-like"] = "mongo"
self.assertEqual(self.opman.retrieve_doc(next(oplog_entries)), doc)
# Retrieve a document from remove operation in oplog
# (expected: None)
self.primary_conn["test"]["test"].remove({
"i": 1
})
oplog_entries = self.primary_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.DESCENDING)],
limit=1
)
self.assertEqual(self.opman.retrieve_doc(next(oplog_entries)), None)
# Retrieve a document with bad _id
# (expected: None)
oplog_entry["o"]["_id"] = "ThisIsNotAnId123456789"
self.assertEqual(self.opman.retrieve_doc(oplog_entry), None)
def test_get_oplog_cursor(self):
'''Test the get_oplog_cursor method'''
# Trivial case: timestamp is None
self.assertEqual(self.opman.get_oplog_cursor(None), None)
# earliest entry is after given timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert(doc)
self.assertEqual(self.opman.get_oplog_cursor(
bson.Timestamp(1, 0)), None)
# earliest entry is the only one at/after timestamp
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
self.assertEqual(self.opman.retrieve_doc(next(cursor)), doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert(
{"i": i} for i in range(2, 1002))
oplog_cursor = self.oplog_coll.find(
sort=[("ts", pymongo.ASCENDING)]
)
#.........这里部分代码省略.........
示例11: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
self.repl_set = ReplicaSetSingle().start()
self.primary_conn = self.repl_set.client()
self.oplog_coll = self.primary_conn.local['oplog.rs']
def reset_opman(self, include_ns=None, exclude_ns=None, dest_mapping=None):
if include_ns is None:
include_ns = []
if exclude_ns is None:
exclude_ns = []
if dest_mapping is None:
dest_mapping = {}
# include_ns must not exist together with exclude_ns
# dest_mapping must exist together with include_ns
# those checks have been tested in test_config.py so we skip that here.
self.dest_mapping_stru = DestMapping(include_ns, exclude_ns,
dest_mapping)
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
dest_mapping_stru=self.dest_mapping_stru,
ns_set=include_ns,
ex_ns_set=exclude_ns
)
def init_dbs(self):
# includedb1.* & includedb2.includecol1 are interested collections
self.primary_conn["includedb1"]["includecol1"].insert_many(
[{"idb1col1": i} for i in range(1, 3)])
self.primary_conn["includedb1"]["includecol2"].insert_many(
[{"idb1col2": i} for i in range(1, 3)])
self.primary_conn["includedb2"]["includecol1"].insert_many(
[{"idb2col1": i} for i in range(1, 3)])
# the others are not interested collections
self.primary_conn["includedb2"]["excludecol2"].insert_many(
[{"idb2col2": i} for i in range(1, 3)])
self.primary_conn["excludedb3"]["excludecol1"].insert_many(
[{"idb3col1": i} for i in range(1, 3)])
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
for db in self.primary_conn.database_names():
if db != "local":
self.primary_conn.drop_database(db)
close_client(self.primary_conn)
self.repl_set.stop()
def test_get_oplog_cursor(self):
'''Test the get_oplog_cursor method'''
# Put something in the dbs
self.init_dbs()
# timestamp is None - all oplog entries excluding no-ops are returned.
# wildcard include case no impact the result
self.reset_opman(["includedb1.*", "includedb2.includecol1"], [], {})
got_cursor = self.opman.get_oplog_cursor(None)
oplog_cursor = self.oplog_coll.find(
{'op': {'$ne': 'n'}})
self.assertNotEqual(got_cursor, None)
self.assertEqual(got_cursor.count(), oplog_cursor.count())
# wildcard exclude case no impact the result
self.reset_opman([], ["includedb2.excludecol2", "excludedb3.*"], {})
got_cursor = self.opman.get_oplog_cursor(None)
oplog_cursor = self.oplog_coll.find(
{'op': {'$ne': 'n'}})
self.assertNotEqual(got_cursor, None)
self.assertEqual(got_cursor.count(), oplog_cursor.count())
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "idb1col1": 1}
self.primary_conn["includedb1"]["includecol1"].insert_one(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)['o']['_id']
retrieved = self.primary_conn.includedb1.includecol1.find_one(
next_entry_id)
self.assertEqual(retrieved, doc)
#.........这里部分代码省略.........
示例12: TestOplogManager
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
self.repl_set = ReplicaSet().start()
self.primary_conn = self.repl_set.client()
self.oplog_coll = self.primary_conn.local['oplog.rs']
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict()
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.drop_database("test")
close_client(self.primary_conn)
self.repl_set.stop()
def test_get_oplog_cursor(self):
'''Test the get_oplog_cursor method'''
# timestamp is None - all oplog entries are returned.
cursor = self.opman.get_oplog_cursor(None)
self.assertEqual(cursor.count(),
self.primary_conn["local"]["oplog.rs"].count())
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert_one(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)['o']['_id']
retrieved = self.primary_conn.test.test.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert_many(
[{"i": i} for i in range(2, 1002)])
oplog_cursor = self.oplog_coll.find(
{'op': {'$ne': 'n'},
'ns': {'$not': re.compile(r'\.(system|\$cmd)')}},
sort=[("ts", pymongo.ASCENDING)]
)
# initial insert + 1000 more inserts
self.assertEqual(oplog_cursor.count(), 1 + 1000)
pivot = oplog_cursor.skip(400).limit(-1)[0]
goc_cursor = self.opman.get_oplog_cursor(pivot["ts"])
self.assertEqual(goc_cursor.count(), 1 + 1000 - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.assertEqual(self.opman.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({
"i": i + 500
})
oplog = self.primary_conn["local"]["oplog.rs"]
oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
self.assertEqual(self.opman.get_last_oplog_timestamp(),
oplog["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({
"i": i + 500
})
last_ts = self.opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, self.opman.dump_collection())
self.assertEqual(len(self.opman.doc_managers[0]._search()), 1000)
#.........这里部分代码省略.........
示例13: TestFilterFields
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
class TestFilterFields(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.repl_set = ReplicaSetSingle().start()
cls.primary_conn = cls.repl_set.client()
cls.oplog_coll = cls.primary_conn.local["oplog.rs"]
@classmethod
def tearDownClass(cls):
cls.primary_conn.drop_database("test")
close_client(cls.primary_conn)
cls.repl_set.stop()
def setUp(self):
self.namespace_config = NamespaceConfig()
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=self.namespace_config,
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
# OplogThread may not have been started
pass
def reset_include_fields(self, fields):
self.opman.namespace_config = NamespaceConfig(include_fields=fields)
def reset_exclude_fields(self, fields):
self.opman.namespace_config = NamespaceConfig(exclude_fields=fields)
def test_filter_fields(self):
docman = self.opman.doc_managers[0]
conn = self.opman.primary_client
include_fields = ["a", "b", "c"]
exclude_fields = ["d", "e", "f"]
# Set fields to care about
self.reset_include_fields(include_fields)
# Documents have more than just these fields
doc = {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "_id": 1}
db = conn["test"]["test"]
db.insert_one(doc)
assert_soon(lambda: db.count() == 1)
self.opman.dump_collection()
result = docman._search()[0]
keys = result.keys()
for inc, exc in zip(include_fields, exclude_fields):
self.assertIn(inc, keys)
self.assertNotIn(exc, keys)
def test_filter_exclude_oplog_entry(self):
# Test oplog entries: these are callables, since
# filter_oplog_entry modifies the oplog entry in-place
def insert_op():
return {"op": "i", "o": {"_id": 0, "a": 1, "b": 2, "c": 3}}
def update_op():
return {
"op": "u",
"o": {"$set": {"a": 4, "b": 5}, "$unset": {"c": True}},
"o2": {"_id": 1},
}
def filter_doc(document, fields):
if fields and "_id" in fields:
fields.remove("_id")
return self.opman.filter_oplog_entry(document, exclude_fields=fields)
# Case 0: insert op, no fields provided
filtered = filter_doc(insert_op(), None)
self.assertEqual(filtered, insert_op())
# Case 1: insert op, fields provided
filtered = filter_doc(insert_op(), ["c"])
self.assertEqual(filtered["o"], {"_id": 0, "a": 1, "b": 2})
# Case 2: insert op, fields provided, doc becomes empty except for _id
filtered = filter_doc(insert_op(), ["a", "b", "c"])
self.assertEqual(filtered["o"], {"_id": 0})
# Case 3: update op, no fields provided
filtered = filter_doc(update_op(), None)
self.assertEqual(filtered, update_op())
# Case 4: update op, fields provided
filtered = filter_doc(update_op(), ["b"])
self.assertNotIn("b", filtered["o"]["$set"])
self.assertIn("a", filtered["o"]["$set"])
self.assertEqual(filtered["o"]["$unset"], update_op()["o"]["$unset"])
# Case 5: update op, fields provided, empty $set
filtered = filter_doc(update_op(), ["a", "b"])
self.assertNotIn("$set", filtered["o"])
#.........这里部分代码省略.........
示例14: TestFilterFields
# 需要导入模块: from mongo_connector.oplog_manager import OplogThread [as 别名]
# 或者: from mongo_connector.oplog_manager.OplogThread import join [as 别名]
class TestFilterFields(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.repl_set = ReplicaSetSingle().start()
cls.primary_conn = cls.repl_set.client()
cls.oplog_coll = cls.primary_conn.local['oplog.rs']
@classmethod
def tearDownClass(cls):
cls.primary_conn.drop_database("test")
close_client(cls.primary_conn)
cls.repl_set.stop()
def setUp(self):
self.namespace_config = NamespaceConfig()
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=self.namespace_config
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
# OplogThread may not have been started
pass
def reset_include_fields(self, fields):
self.opman.namespace_config = NamespaceConfig(include_fields=fields)
def reset_exclude_fields(self, fields):
self.opman.namespace_config = NamespaceConfig(exclude_fields=fields)
def test_filter_fields(self):
docman = self.opman.doc_managers[0]
conn = self.opman.primary_client
include_fields = ["a", "b", "c"]
exclude_fields = ["d", "e", "f"]
# Set fields to care about
self.reset_include_fields(include_fields)
# Documents have more than just these fields
doc = {
"a": 1, "b": 2, "c": 3,
"d": 4, "e": 5, "f": 6,
"_id": 1
}
db = conn['test']['test']
db.insert_one(doc)
assert_soon(lambda: db.count() == 1)
self.opman.dump_collection()
result = docman._search()[0]
keys = result.keys()
for inc, exc in zip(include_fields, exclude_fields):
self.assertIn(inc, keys)
self.assertNotIn(exc, keys)
def test_filter_exclude_oplog_entry(self):
# Test oplog entries: these are callables, since
# filter_oplog_entry modifies the oplog entry in-place
insert_op = lambda: {
"op": "i",
"o": {
"_id": 0,
"a": 1,
"b": 2,
"c": 3
}
}
update_op = lambda: {
"op": "u",
"o": {
"$set": {
"a": 4,
"b": 5
},
"$unset": {
"c": True
}
},
"o2": {
"_id": 1
}
}
def filter_doc(document, fields):
if fields and '_id' in fields:
fields.remove('_id')
return self.opman.filter_oplog_entry(
document, exclude_fields=fields)
# Case 0: insert op, no fields provided
filtered = filter_doc(insert_op(), None)
self.assertEqual(filtered, insert_op())
#.........这里部分代码省略.........