本文整理汇总了Python中shardmonster.api.set_shard_at_rest函数的典型用法代码示例。如果您正苦于以下问题:Python set_shard_at_rest函数的具体用法?Python set_shard_at_rest怎么用?Python set_shard_at_rest使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了set_shard_at_rest函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_update
def test_update(self):
# Put the same document in multiple locations (a mid-migration status)
# then do an update and ensure that only the correct place has been
# updated.
api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
doc1 = {'x': 1, 'y': 1}
self.db1.dummy.insert(doc1)
api.start_migration('dummy', 1, 'dest2/test_sharding')
api.set_shard_to_migration_status(
'dummy', 1, api.ShardStatus.MIGRATING_COPY)
self.db2.dummy.insert(doc1)
result = operations.multishard_update('dummy', {}, {'$inc': {'y': 1}})
self.assertEquals(1, result['n'])
# Query the correct shard first and see that the counter has been
# incremented
result, = operations.multishard_find('dummy', {'x': 1})
self.assertEquals(2, result['y'])
# Now spoof the metadata such that the system thinks the data is on
# shard2. The counter should still be 1 here.
api.set_shard_at_rest('dummy', 1, "dest2/test_sharding", force=True)
result, = operations.multishard_find('dummy', {'x': 1})
self.assertEquals(1, result['y'])
示例2: test_alive
def test_alive(self):
api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
doc1 = {'x': 1, 'y': 1}
self.db1.dummy.insert(doc1)
c = operations.multishard_find('dummy', {})
self.assertTrue(c.alive)
示例3: test_sync_uses_correct_connection
def test_sync_uses_correct_connection(self):
"""This tests for a bug found during a rollout. The connection for the
metadata was assumed to be the same connection as the source data was
going to be coming from. This is *not* always the case.
"""
# To test this a migration from new to old will expose the bug
api.set_shard_at_rest('dummy', 1, "dest2/test_sharding")
api.start_migration('dummy', 1, "dest1/test_sharding")
# Mimic the state the shard would be in after a document was copied
# from one location to another
doc1 = {'x': 1, 'y': 1}
doc1['_id'] = self.db1.dummy.insert(doc1)
self.db2.dummy.insert(doc1)
# Get the initial oplog position, do an update and then sync from the
# initial position
initial_oplog_pos = sharder._get_oplog_pos('dummy', 1)
self.db2.dummy.update({'x': 1}, {'$inc': {'y': 1}})
api.set_shard_to_migration_status(
'dummy', 1, api.ShardStatus.MIGRATING_SYNC)
sharder._sync_from_oplog('dummy', 1, initial_oplog_pos)
# The data on the first database should now reflect the update that
# went through
doc2, = self.db1.dummy.find({})
self.assertEquals(2, doc2['y'])
示例4: test_multishard_count_with_motion
def test_multishard_count_with_motion(self):
api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
api.set_shard_at_rest('dummy', 2, "dest1/test_sharding")
doc1 = {'x': 1, 'y': 1}
doc2 = {'x': 1, 'y': 2}
doc3 = {'x': 2, 'y': 1}
doc4 = {'x': 2, 'y': 2}
self.db1.dummy.insert(doc1)
self.db1.dummy.insert(doc2)
self.db1.dummy.insert(doc3)
self.db1.dummy.insert(doc4)
results = operations.multishard_find('dummy', {}).count()
self.assertEquals(4, results)
# Mimic the shard now being in the second location and there being
# documents left here
api.start_migration('dummy', 2, "dest2/test_sharding")
api.set_shard_to_migration_status(
'dummy', 2, api.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)
self.db2.dummy.insert(doc3)
self.db2.dummy.insert(doc4)
results = operations.multishard_find('dummy', {}).count()
self.assertEquals(4, results)
示例5: _attempt_migration
def _attempt_migration(self, num_records):
api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
api.set_shard_at_rest('dummy', 2, "dest1/test_sharding")
account_1 = self._prepare_account_data(
self.db1, 1, xrange(0, num_records))
account_2 = self._prepare_account_data(
self.db1, 2, xrange(0, num_records))
shard_manager = sharder._begin_migration(
'dummy', 1, "dest2/test_sharding")
self._modify_data(account_1, account_2, num_records, num_records)
while not shard_manager.is_finished():
time.sleep(0.01)
self._verify_end_state(
account_1, account_2, self.unwrapped_dummy_1, self.unwrapped_dummy_2)
# Check that the data for the other account has remained intact and in
# the same place
account_2_actual = list(self.unwrapped_dummy_1.find({'account_id': 2}))
account_2_actual = list(sorted(
account_2_actual, key=lambda r: r['some_key']))
self.assertEquals(account_2, account_2_actual)
# Now migrate back to the source
print 'Now migrate backwards...'
shard_manager = sharder._begin_migration(
'dummy', 1, "dest1/test_sharding")
self._modify_data(account_1, account_2, num_records * 2, num_records)
while not shard_manager.is_finished():
time.sleep(0.01)
self._verify_end_state(
account_1, account_2, self.unwrapped_dummy_2, self.unwrapped_dummy_1)
示例6: test_query
def test_query(self):
api.create_realm("dummy-realm", "some_field", "dummy_collection")
api.set_shard_at_rest("dummy-realm", 1, "dest1/some_db")
expected_metadata = {"shard_key": 1, "location": "dest1/some_db", "realm": "dummy-realm"}
def _trim_results(docs):
return [{"shard_key": doc["shard_key"], "location": doc["location"], "realm": doc["realm"]} for doc in docs]
store = metadata.ShardMetadataStore({"name": "dummy-realm"})
results = _trim_results(store._query_shards_collection())
self.assertEquals([expected_metadata], results)
results = _trim_results(store._query_shards_collection(1))
self.assertEquals([expected_metadata], results)
results = _trim_results(store._query_shards_collection(2))
self.assertEquals([], results)
store = metadata.ShardMetadataStore({"name": "some-other-realm"})
results = _trim_results(store._query_shards_collection())
self.assertEquals([], results)
results = _trim_results(store._query_shards_collection(1))
self.assertEquals([], results)
示例7: test_set_shard_at_rest_bad_location
def test_set_shard_at_rest_bad_location(self):
ensure_realm_exists('some_realm', 'some_field', 'some_collection')
with self.assertRaises(Exception) as catcher:
set_shard_at_rest('some_realm', 1, 'bad-cluster/db')
self.assertEquals(
catcher.exception.message,
'Cluster bad-cluster has not been configured')
示例8: run
def run(self):
try:
blue('* Starting migration')
api.start_migration(
self.collection_name, self.shard_key, self.new_location)
# Copy phase
blue('* Doing copy')
oplog_pos = _get_oplog_pos(self.collection_name, self.shard_key)
_do_copy(self.collection_name, self.shard_key, self.insert_throttle)
# Sync phase
blue('* Initial oplog sync')
start_sync_time = time.time()
api.set_shard_to_migration_status(
self.collection_name, self.shard_key, metadata.ShardStatus.MIGRATING_SYNC)
oplog_pos = _sync_from_oplog(
self.collection_name, self.shard_key, oplog_pos)
# Ensure that the sync has taken at least as long as our caching time
# to ensure that all writes will get paused at approximately the same
# time.
while time.time() < start_sync_time + api.get_caching_duration():
time.sleep(0.05)
oplog_pos = _sync_from_oplog(
self.collection_name, self.shard_key, oplog_pos)
# Now all the caching of metadata should be stopped for this shard.
# We can flip to being paused at destination and wait ~100ms for any
# pending updates/inserts to be performed. If these are taking longer
# than 100ms then you are in a bad place and should rethink sharding.
blue('* Pausing at destination')
api.set_shard_to_migration_status(
self.collection_name, self.shard_key,
metadata.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)
time.sleep(0.1)
blue('* Syncing oplog once more')
_sync_from_oplog(
self.collection_name, self.shard_key, oplog_pos)
# Delete phase
blue('* Doing deletion')
api.set_shard_to_migration_status(
self.collection_name, self.shard_key,
metadata.ShardStatus.POST_MIGRATION_DELETE)
_delete_source_data(
self.collection_name, self.shard_key,
delete_throttle=self.delete_throttle)
api.set_shard_at_rest(
self.collection_name, self.shard_key, self.new_location,
force=True)
blue('* Done')
except:
self.exception = sys.exc_info()
raise
示例9: run
def run(self):
try:
# Copy phase
self.manager.set_phase('copy')
api.start_migration(
self.collection_name, self.shard_key, self.new_location)
oplog_pos = _get_oplog_pos(self.collection_name, self.shard_key)
_do_copy(self.collection_name, self.shard_key, self.manager)
# Sync phase
self.manager.set_phase('sync')
start_sync_time = time.time()
api.set_shard_to_migration_status(
self.collection_name, self.shard_key, metadata.ShardStatus.MIGRATING_SYNC)
oplog_pos = _sync_from_oplog(
self.collection_name, self.shard_key, oplog_pos)
# Ensure that the sync has taken at least as long as our caching time
# to ensure that all writes will get paused at approximately the same
# time.
while time.time() < start_sync_time + api.get_caching_duration():
time.sleep(0.05)
oplog_pos = _sync_from_oplog(
self.collection_name, self.shard_key, oplog_pos)
# Now all the caching of metadata should be stopped for this shard.
# We can flip to being paused at destination and wait ~100ms for any
# pending updates/inserts to be performed. If these are taking longer
# than 100ms then you are in a bad place and should rethink sharding.
api.set_shard_to_migration_status(
self.collection_name, self.shard_key,
metadata.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)
time.sleep(0.1)
# Sync the oplog one final time to catch any writes that were
# performed during the pause
_sync_from_oplog(
self.collection_name, self.shard_key, oplog_pos)
# Delete phase
self.manager.set_phase('delete')
api.set_shard_to_migration_status(
self.collection_name, self.shard_key,
metadata.ShardStatus.POST_MIGRATION_DELETE)
_delete_source_data(
self.collection_name, self.shard_key, self.manager)
api.set_shard_at_rest(
self.collection_name, self.shard_key, self.new_location,
force=True)
self.manager.set_phase('complete')
except:
close_thread_connections(threading.current_thread())
self.exception = sys.exc_info()
raise
示例10: test_where_is
def test_where_is(self):
ensure_realm_exists(
'some_realm', 'some_field', 'some_collection', 'dest1/db')
set_shard_at_rest('some_realm', 1, 'dest2/db')
# Specific location
self.assertEquals('dest2/db', where_is('some_collection', 1))
# Default location
self.assertEquals('dest1/db', where_is('some_collection', 2))
示例11: test_cannot_move_to_same_location
def test_cannot_move_to_same_location(self):
ensure_realm_exists(
'some_realm', 'some_field', 'some_collection', 'dest1/db')
set_shard_at_rest('some_realm', 1, 'dest1/db')
with self.assertRaises(Exception) as catcher:
start_migration('some_realm', 1, 'dest1/db')
self.assertEquals(
catcher.exception.message, 'Shard is already at dest1/db')
示例12: test_multishard_find
def test_multishard_find(self):
api.set_shard_at_rest('dummy', 1, 'dest1/test_sharding')
api.set_shard_at_rest('dummy', 2, 'dest2/test_sharding')
doc1 = {'x': 1, 'y': 1}
doc2 = {'x': 2, 'y': 1}
self.db1.dummy.insert(doc1)
self.db2.dummy.insert(doc2)
c = operations.multishard_find('dummy', {'y': 1})
results = sorted(list(c), key=lambda d: d['x'])
self.assertEquals([doc1, doc2], results)
示例13: test_where_is
def test_where_is(self):
ensure_realm_exists('some_realm', 'some_field', 'some_collection')
set_shard_at_rest('some_realm', 1, 'dest2/db')
# Specific location
self.assertEquals('dest2/db', where_is('some_collection', 1))
# Lack of a location
with self.assertRaises(Exception) as catcher:
self.assertEquals('dest1/db', where_is('some_collection', 2))
self.assertEquals(
catcher.exception.message, 'Shard key 2 not placed for some_realm')
示例14: test_basic_copy
def test_basic_copy(self):
api.set_shard_at_rest("dummy", 1, "dest1/test_sharding")
doc1 = {"x": 1, "y": 1}
doc1["_id"] = self.db1.dummy.insert(doc1)
api.start_migration("dummy", 1, "dest2/test_sharding")
sharder._do_copy("dummy", 1)
# The data should now be on the second database
doc2, = self.db2.dummy.find({})
self.assertEquals(doc1, doc2)
示例15: test_basic_copy
def test_basic_copy(self):
api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
doc1 = {'x': 1, 'y': 1}
doc1['_id'] = self.db1.dummy.insert(doc1)
api.start_migration('dummy', 1, "dest2/test_sharding")
manager = Mock(insert_throttle=None)
sharder._do_copy('dummy', 1, manager)
# The data should now be on the second database
doc2, = self.db2.dummy.find({})
self.assertEquals(doc1, doc2)