本文整理汇总了Python中twisted.python.monkey.MonkeyPatcher.addPatch方法的典型用法代码示例。如果您正苦于以下问题:Python MonkeyPatcher.addPatch方法的具体用法?Python MonkeyPatcher.addPatch怎么用?Python MonkeyPatcher.addPatch使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类twisted.python.monkey.MonkeyPatcher
的用法示例。
在下文中一共展示了MonkeyPatcher.addPatch方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_put_verifyProperRemoval
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
def test_put_verifyProperRemoval(self):
# Replace the time function of the datastore module
# so that we can artificially speed up time
monkey_patcher = MonkeyPatcher()
c = clock()
c.set(0)
monkey_patcher.addPatch(datastore, "time", c)
# Replace the peer_timeout to 5 seconds
monkey_patcher.addPatch(constants, "peer_timeout", 5)
monkey_patcher.patch()
# Insert a node and verify it is within the datastore
m = self.datastore(self.reactor)
infohash = 5
expected_peer = ("127.0.0.1", 5151)
m.put(infohash, expected_peer)
peers = m.get(infohash)
# Iterate over a 1 element list
for peer in peers:
self.assertEqual(expected_peer, peer)
self.assertEquals(1, len(peers))
# Change the time and verify that the cleaning function
# actually removes the peer
c.set(5)
# TODO hackish, shouldnt reach into object
m._cleanup(infohash, peer)
peers = m.get(infohash)
self.assertEqual(0, len(peers))
monkey_patcher.restore()
示例2: test_error_logging
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
def test_error_logging(self, logger):
"""
Failures while applying a diff emit a log message containing the full
diff.
"""
o1 = DiffTestObjInvariant(
a=1,
b=2,
)
patcher = MonkeyPatcher()
patcher.addPatch(
DiffTestObjInvariant,
'_perform_invariant_check',
False
)
patcher.patch()
try:
o2 = o1.set('b', 1)
finally:
patcher.restore()
diff = create_diff(o1, o2)
self.assertRaises(
InvariantException,
diff.apply,
o1,
)
示例3: TestingBase
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
class TestingBase(object):
def setUp(self):
self.clock = Clock()
self.monkey_patcher = MonkeyPatcher()
self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
self.monkey_patcher.patch()
def tearDown(self):
self.monkey_patcher.restore()
示例4: test_metadata_service
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
def test_metadata_service(self):
"""
The instance ID is retrieved from the metadata service if it can't be
found on the config drive.
"""
patch = MonkeyPatcher()
# A compute_instance_id found from the metadata service
server_compute_instance_id = unicode(uuid4())
# Point the API to a config drive label that won't be found.
configdrive_label = filesystem_label_for_test(self)
patch.addPatch(
self.api,
'_config_drive_label',
configdrive_label,
)
# Set up a fake metadata service and point the API to its endpoint
listening = webserver_for_test(
self,
url_path="/" + "/".join(METADATA_RELATIVE_PATH),
response_content=json.dumps(
{"uuid": server_compute_instance_id}
),
)
def set_metadata_service_endpoint(port):
address = port.getHost()
endpoint = (address.host, address.port)
patch.addPatch(
self.api,
'_metadata_service_endpoint',
endpoint,
)
return port
listening.addCallback(set_metadata_service_endpoint)
# Run compute_instance_id in a separate thread.
# With the API patched to check the fake metadata sources.
def start_compute_instance_id(port):
patch.patch()
return deferToThread(
self.api.compute_instance_id
)
connecting = listening.addCallback(start_compute_instance_id)
def check(result):
self.assertEqual(server_compute_instance_id, result)
checking = connecting.addCallback(check)
return checking
示例5: test_unknown_instance_id
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
def test_unknown_instance_id(self):
"""
``UnknownInstanceID`` is raised if all node UUID lookup mechanisms
fail.
"""
patch = MonkeyPatcher()
# Use non-existent config drive label.
# Mount will fail.
patch.addPatch(
self.api,
'_config_drive_label',
filesystem_label_for_test(self)
)
# Use an unreachable metadata service endpoint address.
# TCP connections will fail.
patch.addPatch(
self.api,
'_metadata_service_endpoint',
find_free_port()
)
self.addCleanup(patch.restore)
patch.patch()
self.assertRaises(UnknownInstanceID, self.api.compute_instance_id)
示例6: test_put_reannounceResetsTimer
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
def test_put_reannounceResetsTimer(self):
# Replace the time function of the datastore module
# so that we can artificially speed up time
monkey_patcher = MonkeyPatcher()
c = clock()
c.set(0)
monkey_patcher.addPatch(datastore, "time", c)
# Replace the peer_timeout to 5 seconds
monkey_patcher.addPatch(constants, "peer_timeout", 5)
monkey_patcher.patch()
# Insert a node and verify it is within the datastore
m = self.datastore(self.reactor)
infohash = 5
expected_peer = ("127.0.0.1", 5151)
m.put(infohash, expected_peer)
peers = m.get(infohash)
# Iterate over a 1 element list
self.assertEquals(1, len(peers))
for peer in peers:
self.assertEqual(expected_peer, peer)
# Change the time and reannounce the peer
# (make sure the cleanup function doesnt
# remove the peer yet)
c.set(4)
m.put(infohash, expected_peer)
peers = m.get(infohash)
self.assertEqual(1, len(peers))
m._cleanup(infohash, expected_peer)
c.set(8)
m._cleanup(infohash, expected_peer)
peers = m.get(infohash)
self.assertEqual(1, len(peers))
c.set(9)
m._cleanup(infohash, expected_peer)
peers = m.get(infohash)
self.assertEqual(0, len(peers))
monkey_patcher.restore()
示例7: MonkeyPatcherTest
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
class MonkeyPatcherTest(unittest.TestCase):
"""
Tests for L{MonkeyPatcher} monkey-patching class.
"""
def setUp(self):
self.testObject = TestObj()
self.originalObject = TestObj()
self.monkeyPatcher = MonkeyPatcher()
def test_empty(self):
"""
A monkey patcher without patches shouldn't change a thing.
"""
self.monkeyPatcher.patch()
# We can't assert that all state is unchanged, but at least we can
# check our test object.
self.assertEqual(self.originalObject.foo, self.testObject.foo)
self.assertEqual(self.originalObject.bar, self.testObject.bar)
self.assertEqual(self.originalObject.baz, self.testObject.baz)
def test_constructWithPatches(self):
"""
Constructing a L{MonkeyPatcher} with patches should add all of the
given patches to the patch list.
"""
patcher = MonkeyPatcher((self.testObject, "foo", "haha"), (self.testObject, "bar", "hehe"))
patcher.patch()
self.assertEqual("haha", self.testObject.foo)
self.assertEqual("hehe", self.testObject.bar)
self.assertEqual(self.originalObject.baz, self.testObject.baz)
def test_patchExisting(self):
"""
Patching an attribute that exists sets it to the value defined in the
patch.
"""
self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
self.monkeyPatcher.patch()
self.assertEqual(self.testObject.foo, "haha")
def test_patchNonExisting(self):
"""
Patching a non-existing attribute fails with an C{AttributeError}.
"""
self.monkeyPatcher.addPatch(self.testObject, "nowhere", "blow up please")
self.assertRaises(AttributeError, self.monkeyPatcher.patch)
def test_patchAlreadyPatched(self):
"""
Adding a patch for an object and attribute that already have a patch
overrides the existing patch.
"""
self.monkeyPatcher.addPatch(self.testObject, "foo", "blah")
self.monkeyPatcher.addPatch(self.testObject, "foo", "BLAH")
self.monkeyPatcher.patch()
self.assertEqual(self.testObject.foo, "BLAH")
self.monkeyPatcher.restore()
self.assertEqual(self.testObject.foo, self.originalObject.foo)
def test_restoreTwiceIsANoOp(self):
"""
Restoring an already-restored monkey patch is a no-op.
"""
self.monkeyPatcher.addPatch(self.testObject, "foo", "blah")
self.monkeyPatcher.patch()
self.monkeyPatcher.restore()
self.assertEqual(self.testObject.foo, self.originalObject.foo)
self.monkeyPatcher.restore()
self.assertEqual(self.testObject.foo, self.originalObject.foo)
def test_runWithPatchesDecoration(self):
"""
runWithPatches should run the given callable, passing in all arguments
and keyword arguments, and return the return value of the callable.
"""
log = []
def f(a, b, c=None):
log.append((a, b, c))
return "foo"
result = self.monkeyPatcher.runWithPatches(f, 1, 2, c=10)
self.assertEqual("foo", result)
self.assertEqual([(1, 2, 10)], log)
def test_repeatedRunWithPatches(self):
"""
We should be able to call the same function with runWithPatches more
than once. All patches should apply for each call.
"""
def f():
return (self.testObject.foo, self.testObject.bar, self.testObject.baz)
self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
result = self.monkeyPatcher.runWithPatches(f)
self.assertEqual(("haha", self.originalObject.bar, self.originalObject.baz), result)
result = self.monkeyPatcher.runWithPatches(f)
#.........这里部分代码省略.........
示例8: _build_and_test_api
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
def _build_and_test_api(self, listening_port):
"""
Build the CinderBlockDeviceAPI configured to connect to the Mimic
server at ``listening_port``.
Patch twisted.web to force the mimic server to drop incoming
connections.
And attempt to interact with the disabled API server first and then
after re-enabling it to show that the API will re-authenticate even
after an initial failure.
"""
import twisted.web.http
patch = MonkeyPatcher()
patch.addPatch(
twisted.web.http.HTTPChannel,
'connectionMade',
lambda self: self.transport.loseConnection()
)
self.addCleanup(patch.restore)
backend, api_args = backend_and_api_args_from_configuration({
"backend": "openstack",
"auth_plugin": "rackspace",
"region": "ORD",
"username": "mimic",
"api_key": "12345",
"auth_url": "http://127.0.0.1:{}/identity/v2.0".format(
listening_port.getHost().port
),
})
# Cause the Mimic server to close incoming connections
patch.patch()
api = get_api(
backend=backend,
api_args=api_args,
reactor=object(),
cluster_id=make_cluster_id(TestTypes.FUNCTIONAL),
)
# List volumes with API patched to close incoming connections.
try:
result = api.list_volumes()
except ConnectFailure:
# Can't use self.assertRaises here because that would call the
# function in the main thread.
pass
else:
self.fail(
'ConnectFailure was not raised. '
'Got {!r} instead.'.format(
result
)
)
finally:
# Re-enable the Mimic server.
# The API operations that follow should succeed.
patch.restore()
# List volumes with API re-enabled
result = api.list_volumes()
self.assertEqual([], result)
# Close the connection from the client side so that the mimic server
# can close down without leaving behind lingering persistent HTTP
# channels which cause dirty reactor errors.
# XXX: This is gross. Perhaps we need ``IBlockDeviceAPI.close``
(api
.cinder_volume_manager
._original
._client_v2
._cinder_volumes
.api
.client
.session
.session.close())
示例9: test_config_drive
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
def test_config_drive(self):
"""
The instance ID is retrieved from the config drive in preference to the
metadata server.
"""
patch = MonkeyPatcher()
# A compute_instance_id found on config drive
drive_compute_instance_id = unicode(uuid4())
# A compute_instance_id found from the metadata service
server_compute_instance_id = unicode(uuid4())
# Set up a fake config drive and point the API to its label
configdrive_label = filesystem_label_for_test(self)
device = formatted_loopback_device_for_test(
self,
label=configdrive_label,
)
with temporary_mount(device.device) as mountpoint:
metadata_file = mountpoint.descendant(
METADATA_RELATIVE_PATH
)
metadata_file.parent().makedirs()
metadata_file.setContent(
json.dumps({
"uuid": drive_compute_instance_id
})
)
patch.addPatch(
self.api,
'_config_drive_label',
configdrive_label,
)
# Set up a fake metadata service and point the API to its endpoint
listening = webserver_for_test(
self,
url_path="/" + "/".join(METADATA_RELATIVE_PATH),
response_content=json.dumps(
{"uuid": server_compute_instance_id}
),
)
def set_metadata_service_endpoint(port):
address = port.getHost()
endpoint = (address.host, address.port)
patch.addPatch(
self.api,
'_metadata_service_endpoint',
endpoint,
)
return port
listening.addCallback(set_metadata_service_endpoint)
# Run compute_instance_id in a separate thread.
# With the API patched to check the fake metadata sources.
def start_compute_instance_id(port):
patch.patch()
return deferToThread(
self.api.compute_instance_id
)
connecting = listening.addCallback(start_compute_instance_id)
def check(result):
self.assertEqual(drive_compute_instance_id, result)
checking = connecting.addCallback(check)
return checking
示例10: test_no_retry_authentication
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
def test_no_retry_authentication(self):
"""
The API object returned by ``cinder_from_configuration`` will retry
authentication even when initial authentication attempts fail.
"""
import twisted.web.http
self.patch(
twisted.web.http.HTTPChannel,
'checkPersistence',
lambda self, request, version: False
)
patch = MonkeyPatcher()
patch.addPatch(
twisted.web.http.HTTPChannel,
'connectionMade',
lambda self: self.transport.loseConnection()
)
self.addCleanup(patch.restore)
mimic_starting = mimic_for_test(test_case=self)
def build_api(listening_port):
backend, api_args = backend_and_api_args_from_configuration({
"backend": "openstack",
"auth_plugin": "rackspace",
"region": "ORD",
"username": "mimic",
"api_key": "12345",
"auth_url": "http://127.0.0.1:{}/identity/v2.0".format(
listening_port.getHost().port
),
})
patch.patch()
api = get_api(
backend=backend,
api_args=api_args,
reactor=object(),
cluster_id=make_cluster_id(TestTypes.FUNCTIONAL),
)
patch.restore()
return api
mimic_started = mimic_starting.addCallback(build_api)
def list_volumes(api, force_connection_failure=False):
if force_connection_failure:
patch.patch()
try:
return api.list_volumes()
finally:
patch.restore()
def check_failing_connection(api):
d = deferToThread(
lambda api: list_volumes(api, force_connection_failure=True),
api,
)
d = self.assertFailure(d, ConnectFailure)
# return the api for further testing.
d = d.addCallback(
lambda failure_instance: api
)
return d
listing_volumes1 = mimic_started.addCallback(check_failing_connection)
def check_successful_connection(api):
d = deferToThread(
lambda api: list_volumes(api, force_connection_failure=False),
api,
)
d = d.addCallback(
lambda result: self.assertEqual([], result)
)
return d
finishing = listing_volumes1.addCallback(check_successful_connection)
return finishing
示例11: KRPC_Iterator_TestCase
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
class KRPC_Iterator_TestCase(unittest.TestCase):
# TODO
#
# This inheritance and patching pattern is messy, complex,
# and doesn't make for maintainable code.
#
# Refactor it so that KRPC_Sender has a single reactor
# reference bound within its constructor (at definition time
# as a default argument). This way, you can simply just pass
# in a hollow reactor instead of hacking it in
#
# What about KRPC_Responder and KRPC_Iterator?
# - A pass through argument that floats up through
# the constructors
# TODO
def setUp(self):
self.monkey_patcher = MonkeyPatcher()
self.monkey_patcher.addPatch(krpc_sender, "reactor", HollowReactor())
self.monkey_patcher.patch()
self.k_iter = KRPC_Iterator()
self.k_iter.transport = HollowTransport()
self.target_id = 5
def tearDown(self):
self.monkey_patcher.restore()
#
# Find iterate test cases
#
def test_find_iterate_properNumberOfQueriesSent_noNodesInRT(self):
self._check_k_iter_sendsProperNumberOfQueries_noNodesInRT(
self.k_iter.find_iterate)
def test_find_iterate_firesAfterAllQueriesFire(self):
self._check_k_iter_firesAfterAllQueriesFire(
self.k_iter.find_iterate)
def test_find_iterate_usesNodesFromRoutingTable(self):
self._check_k_iter_usesNodesFromRoutingTable(
self.k_iter.find_iterate)
def test_find_iterate_noNodesRaisesIterationError(self):
self._check_k_iter_raisesIterationErrorOnNoSeedNodes(
self.k_iter.find_iterate)
def test_find_iterate_allQueriesTimeoutRaisesIterationError(self):
self._check_k_iter_failsWhenAllQueriesTimeOut(
self.k_iter.find_iterate)
def test_find_iterate_returnsNewNodes(self):
# deferreds is a (query, deferred) tuple list
(deferreds, d) = self._iterate_and_returnQueriesAndDeferreds(
self.k_iter.find_iterate)
num_queries = len(deferreds)
# Use any nodes as result nodes (even the nodes themselves)
result_nodes = test_nodes[:num_queries]
# Set up dummy node_id's
node_id = 1
for (query, deferred), node in zip(deferreds, result_nodes):
response = query.build_response(nodes=[node])
response._from = node_id
node_id += 1
deferred.callback(response)
expected_nodes = set(result_nodes)
d.addErrback(self._fail_errback)
d.addCallback(self._compare_nodes, expected_nodes)
# Make sure we don't accidentally slip past an
# uncalled deferred
self.assertTrue(d.called)
#
# Get iterate test cases
#
def test_get_iterate_properNumberOfQueriesSent_noNodesInRT(self):
self._check_k_iter_sendsProperNumberOfQueries_noNodesInRT(
self.k_iter.get_iterate)
def test_get_iterate_firesAfterAllQueriesFire(self):
self._check_k_iter_firesAfterAllQueriesFire(
self.k_iter.get_iterate)
def test_get_iterate_usesNodesFromRoutingTable(self):
self._check_k_iter_usesNodesFromRoutingTable(
self.k_iter.get_iterate)
def test_get_iterate_noNodesRaisesIterationError(self):
self._check_k_iter_raisesIterationErrorOnNoSeedNodes(
self.k_iter.get_iterate)
def test_get_iterate_allQueriesTimeoutRaisesIterationError(self):
self._check_k_iter_failsWhenAllQueriesTimeOut(
self.k_iter.get_iterate)
def test_get_iterate_returnsNewNodesAndPeers(self):
# deferreds is a (query, deferred) tuple list
# where each tuple corresponds to one outbound query
# and deferred result
#
# and d is a deferred result of the iter_func
(deferreds, d) = self._iterate_and_returnQueriesAndDeferreds(
#.........这里部分代码省略.........
示例12: RateLimiterPatcherTestCase
# 需要导入模块: from twisted.python.monkey import MonkeyPatcher [as 别名]
# 或者: from twisted.python.monkey.MonkeyPatcher import addPatch [as 别名]
class RateLimiterPatcherTestCase(unittest.TestCase):
def setUp(self):
self.clock = Clock()
self.monkey_patcher = MonkeyPatcher()
self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
self.monkey_patcher.patch()
self.address = ("127.0.0.1", 55)
self.query = Query()
self.query.rpctype = "ping"
self.query._from = 15
self.query._transaction_id = 99
self.packet = krpc_coder.encode(self.query)
# Patch in hardcoded value for the bandwidth
# limits so that changing the constants will
# not effect the usefulness of this test case
# (The global bandwidth is set to 3 standard ping queries)
# (The per user bandwidth is set to 1 standard ping query)
self.monkey_patcher.addPatch(rate_limiter.constants,
"global_bandwidth_rate", 3 * len(self.packet))
self.monkey_patcher.addPatch(rate_limiter.constants,
"host_bandwidth_rate", 1 * len(self.packet))
self.monkey_patcher.patch()
def tearDown(self):
self.monkey_patcher.restore()
def _patched_sender(self):
ksender = KRPC_Sender(TreeRoutingTable, 2**50)
ksender.transport = HollowTransport()
# Start the protocol to simulate
# a regular environment
rate_limited_proto = RateLimiter_Patcher(ksender)
rate_limited_proto.startProtocol()
return rate_limited_proto
def test_inbound_overflowHostAndReset(self):
"""
Make sure that we cannot overflow our inbound host bandwidth limit
@see dhtbot.constants.host_bandwidth_rate
"""
rate_limited_proto = self._patched_sender()
counter = Counter()
rate_limited_proto.krpcReceived = counter
# One packet should be accepted without problems
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), self.address)
self.assertEquals(1, counter.count)
counter.reset()
# The second packet should be dropped
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), self.address)
self.assertEquals(0, counter.count)
# Reset the rate limiter and the next packet should
# be accepted
self.clock.set(1)
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), self.address)
self.assertEquals(1, counter.count)
def test_inbound_overflowGlobalAndReset(self):
"""
Make sure that we cannot overflow our inbound global bandwidth limit
@see dhtbot.constants.host_global_rate
"""
address1 = ("127.0.0.1", 66)
address2 = ("127.0.0.1", 76)
address3 = ("127.0.0.1", 86)
address4 = ("127.0.0.1", 555)
rate_limited_proto = self._patched_sender()
counter = Counter()
rate_limited_proto.krpcReceived = counter
# The first three packets should be accepted without
# any problems
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), address1)
self.assertEquals(1, counter.count)
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), address2)
self.assertEquals(2, counter.count)
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), address3)
self.assertEquals(3, counter.count)
# The fourth packet should be dropped
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), address4)
self.assertEquals(3, counter.count)
# Reset the rate limiter and the next packet should be
# accepted
self.clock.set(1)
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), self.address)
self.assertEquals(4, counter.count)
def test_outbound_overflowHostAndReset(self):
"""
#.........这里部分代码省略.........