本文整理匯總了Python中twisted.python.monkey.MonkeyPatcher.restore方法的典型用法代碼示例。如果您正苦於以下問題:Python MonkeyPatcher.restore方法的具體用法?Python MonkeyPatcher.restore怎麽用?Python MonkeyPatcher.restore使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類twisted.python.monkey.MonkeyPatcher
的用法示例。
在下文中一共展示了MonkeyPatcher.restore方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_put_verifyProperRemoval
# 需要導入模塊: from twisted.python.monkey import MonkeyPatcher [as 別名]
# 或者: from twisted.python.monkey.MonkeyPatcher import restore [as 別名]
def test_put_verifyProperRemoval(self):
# Replace the time function of the datastore module
# so that we can artificially speed up time
monkey_patcher = MonkeyPatcher()
c = clock()
c.set(0)
monkey_patcher.addPatch(datastore, "time", c)
# Replace the peer_timeout to 5 seconds
monkey_patcher.addPatch(constants, "peer_timeout", 5)
monkey_patcher.patch()
# Insert a node and verify it is within the datastore
m = self.datastore(self.reactor)
infohash = 5
expected_peer = ("127.0.0.1", 5151)
m.put(infohash, expected_peer)
peers = m.get(infohash)
# Iterate over a 1 element list
for peer in peers:
self.assertEqual(expected_peer, peer)
self.assertEquals(1, len(peers))
# Change the time and verify that the cleaning function
# actually removes the peer
c.set(5)
# TODO hackish, shouldnt reach into object
m._cleanup(infohash, peer)
peers = m.get(infohash)
self.assertEqual(0, len(peers))
monkey_patcher.restore()
示例2: test_error_logging
# 需要導入模塊: from twisted.python.monkey import MonkeyPatcher [as 別名]
# 或者: from twisted.python.monkey.MonkeyPatcher import restore [as 別名]
def test_error_logging(self, logger):
"""
Failures while applying a diff emit a log message containing the full
diff.
"""
o1 = DiffTestObjInvariant(
a=1,
b=2,
)
patcher = MonkeyPatcher()
patcher.addPatch(
DiffTestObjInvariant,
'_perform_invariant_check',
False
)
patcher.patch()
try:
o2 = o1.set('b', 1)
finally:
patcher.restore()
diff = create_diff(o1, o2)
self.assertRaises(
InvariantException,
diff.apply,
o1,
)
示例3: TestingBase
# 需要導入模塊: from twisted.python.monkey import MonkeyPatcher [as 別名]
# 或者: from twisted.python.monkey.MonkeyPatcher import restore [as 別名]
class TestingBase(object):
def setUp(self):
self.clock = Clock()
self.monkey_patcher = MonkeyPatcher()
self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
self.monkey_patcher.patch()
def tearDown(self):
self.monkey_patcher.restore()
示例4: test_put_reannounceResetsTimer
# 需要導入模塊: from twisted.python.monkey import MonkeyPatcher [as 別名]
# 或者: from twisted.python.monkey.MonkeyPatcher import restore [as 別名]
def test_put_reannounceResetsTimer(self):
# Replace the time function of the datastore module
# so that we can artificially speed up time
monkey_patcher = MonkeyPatcher()
c = clock()
c.set(0)
monkey_patcher.addPatch(datastore, "time", c)
# Replace the peer_timeout to 5 seconds
monkey_patcher.addPatch(constants, "peer_timeout", 5)
monkey_patcher.patch()
# Insert a node and verify it is within the datastore
m = self.datastore(self.reactor)
infohash = 5
expected_peer = ("127.0.0.1", 5151)
m.put(infohash, expected_peer)
peers = m.get(infohash)
# Iterate over a 1 element list
self.assertEquals(1, len(peers))
for peer in peers:
self.assertEqual(expected_peer, peer)
# Change the time and reannounce the peer
# (make sure the cleanup function doesnt
# remove the peer yet)
c.set(4)
m.put(infohash, expected_peer)
peers = m.get(infohash)
self.assertEqual(1, len(peers))
m._cleanup(infohash, expected_peer)
c.set(8)
m._cleanup(infohash, expected_peer)
peers = m.get(infohash)
self.assertEqual(1, len(peers))
c.set(9)
m._cleanup(infohash, expected_peer)
peers = m.get(infohash)
self.assertEqual(0, len(peers))
monkey_patcher.restore()
示例5: MonkeyPatcherTest
# 需要導入模塊: from twisted.python.monkey import MonkeyPatcher [as 別名]
# 或者: from twisted.python.monkey.MonkeyPatcher import restore [as 別名]
class MonkeyPatcherTest(unittest.TestCase):
"""
Tests for L{MonkeyPatcher} monkey-patching class.
"""
def setUp(self):
self.testObject = TestObj()
self.originalObject = TestObj()
self.monkeyPatcher = MonkeyPatcher()
def test_empty(self):
"""
A monkey patcher without patches shouldn't change a thing.
"""
self.monkeyPatcher.patch()
# We can't assert that all state is unchanged, but at least we can
# check our test object.
self.assertEqual(self.originalObject.foo, self.testObject.foo)
self.assertEqual(self.originalObject.bar, self.testObject.bar)
self.assertEqual(self.originalObject.baz, self.testObject.baz)
def test_constructWithPatches(self):
"""
Constructing a L{MonkeyPatcher} with patches should add all of the
given patches to the patch list.
"""
patcher = MonkeyPatcher((self.testObject, "foo", "haha"), (self.testObject, "bar", "hehe"))
patcher.patch()
self.assertEqual("haha", self.testObject.foo)
self.assertEqual("hehe", self.testObject.bar)
self.assertEqual(self.originalObject.baz, self.testObject.baz)
def test_patchExisting(self):
"""
Patching an attribute that exists sets it to the value defined in the
patch.
"""
self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
self.monkeyPatcher.patch()
self.assertEqual(self.testObject.foo, "haha")
def test_patchNonExisting(self):
"""
Patching a non-existing attribute fails with an C{AttributeError}.
"""
self.monkeyPatcher.addPatch(self.testObject, "nowhere", "blow up please")
self.assertRaises(AttributeError, self.monkeyPatcher.patch)
def test_patchAlreadyPatched(self):
"""
Adding a patch for an object and attribute that already have a patch
overrides the existing patch.
"""
self.monkeyPatcher.addPatch(self.testObject, "foo", "blah")
self.monkeyPatcher.addPatch(self.testObject, "foo", "BLAH")
self.monkeyPatcher.patch()
self.assertEqual(self.testObject.foo, "BLAH")
self.monkeyPatcher.restore()
self.assertEqual(self.testObject.foo, self.originalObject.foo)
def test_restoreTwiceIsANoOp(self):
"""
Restoring an already-restored monkey patch is a no-op.
"""
self.monkeyPatcher.addPatch(self.testObject, "foo", "blah")
self.monkeyPatcher.patch()
self.monkeyPatcher.restore()
self.assertEqual(self.testObject.foo, self.originalObject.foo)
self.monkeyPatcher.restore()
self.assertEqual(self.testObject.foo, self.originalObject.foo)
def test_runWithPatchesDecoration(self):
"""
runWithPatches should run the given callable, passing in all arguments
and keyword arguments, and return the return value of the callable.
"""
log = []
def f(a, b, c=None):
log.append((a, b, c))
return "foo"
result = self.monkeyPatcher.runWithPatches(f, 1, 2, c=10)
self.assertEqual("foo", result)
self.assertEqual([(1, 2, 10)], log)
def test_repeatedRunWithPatches(self):
"""
We should be able to call the same function with runWithPatches more
than once. All patches should apply for each call.
"""
def f():
return (self.testObject.foo, self.testObject.bar, self.testObject.baz)
self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
result = self.monkeyPatcher.runWithPatches(f)
self.assertEqual(("haha", self.originalObject.bar, self.originalObject.baz), result)
result = self.monkeyPatcher.runWithPatches(f)
#.........這裏部分代碼省略.........
示例6: build
# 需要導入模塊: from twisted.python.monkey import MonkeyPatcher [as 別名]
# 或者: from twisted.python.monkey.MonkeyPatcher import restore [as 別名]
def build(self, projectName, projectURL, sourceURL, packagePath,
outputPath):
"""
Call pydoctor's entry point with options which will generate HTML
documentation for the specified package's API.
@type projectName: C{str}
@param projectName: The name of the package for which to generate
documentation.
@type projectURL: C{str}
@param projectURL: The location (probably an HTTP URL) of the project
on the web.
@type sourceURL: C{str}
@param sourceURL: The location (probably an HTTP URL) of the root of
the source browser for the project.
@type packagePath: L{FilePath}
@param packagePath: The path to the top-level of the package named by
C{projectName}.
@type outputPath: L{FilePath}
@param outputPath: An existing directory to which the generated API
documentation will be written.
"""
intersphinxes = []
for intersphinx in intersphinxURLs:
intersphinxes.append("--intersphinx")
intersphinxes.append(intersphinx)
# Super awful monkeypatch that will selectively use our templates.
from pydoctor.templatewriter import util
originalTemplatefile = util.templatefile
def templatefile(filename):
if filename in ["summary.html", "index.html", "common.html"]:
twistedPythonDir = FilePath(__file__).parent()
templatesDir = twistedPythonDir.child("_pydoctortemplates")
return templatesDir.child(filename).path
else:
return originalTemplatefile(filename)
monkeyPatch = MonkeyPatcher((util, "templatefile", templatefile))
monkeyPatch.patch()
from pydoctor.driver import main
args = [u"--project-name", projectName,
u"--project-url", projectURL,
u"--system-class", u"twisted.python._pydoctor.TwistedSystem",
u"--project-base-dir", packagePath.parent().path,
u"--html-viewsource-base", sourceURL,
u"--add-package", packagePath.path,
u"--html-output", outputPath.path,
u"--html-write-function-pages", u"--quiet", u"--make-html",
] + intersphinxes
args = [arg.encode("utf-8") for arg in args]
main(args)
monkeyPatch.restore()
示例7: _build_and_test_api
# 需要導入模塊: from twisted.python.monkey import MonkeyPatcher [as 別名]
# 或者: from twisted.python.monkey.MonkeyPatcher import restore [as 別名]
def _build_and_test_api(self, listening_port):
"""
Build the CinderBlockDeviceAPI configured to connect to the Mimic
server at ``listening_port``.
Patch twisted.web to force the mimic server to drop incoming
connections.
And attempt to interact with the disabled API server first and then
after re-enabling it to show that the API will re-authenticate even
after an initial failure.
"""
import twisted.web.http
patch = MonkeyPatcher()
patch.addPatch(
twisted.web.http.HTTPChannel,
'connectionMade',
lambda self: self.transport.loseConnection()
)
self.addCleanup(patch.restore)
backend, api_args = backend_and_api_args_from_configuration({
"backend": "openstack",
"auth_plugin": "rackspace",
"region": "ORD",
"username": "mimic",
"api_key": "12345",
"auth_url": "http://127.0.0.1:{}/identity/v2.0".format(
listening_port.getHost().port
),
})
# Cause the Mimic server to close incoming connections
patch.patch()
api = get_api(
backend=backend,
api_args=api_args,
reactor=object(),
cluster_id=make_cluster_id(TestTypes.FUNCTIONAL),
)
# List volumes with API patched to close incoming connections.
try:
result = api.list_volumes()
except ConnectFailure:
# Can't use self.assertRaises here because that would call the
# function in the main thread.
pass
else:
self.fail(
'ConnectFailure was not raised. '
'Got {!r} instead.'.format(
result
)
)
finally:
# Re-enable the Mimic server.
# The API operations that follow should succeed.
patch.restore()
# List volumes with API re-enabled
result = api.list_volumes()
self.assertEqual([], result)
# Close the connection from the client side so that the mimic server
# can close down without leaving behind lingering persistent HTTP
# channels which cause dirty reactor errors.
# XXX: This is gross. Perhaps we need ``IBlockDeviceAPI.close``
(api
.cinder_volume_manager
._original
._client_v2
._cinder_volumes
.api
.client
.session
.session.close())
示例8: KRPC_Iterator_TestCase
# 需要導入模塊: from twisted.python.monkey import MonkeyPatcher [as 別名]
# 或者: from twisted.python.monkey.MonkeyPatcher import restore [as 別名]
class KRPC_Iterator_TestCase(unittest.TestCase):
# TODO
#
# This inheritance and patching pattern is messy, complex,
# and doesn't make for maintainable code.
#
# Refactor it so that KRPC_Sender has a single reactor
# reference bound within its constructor (at definition time
# as a default argument). This way, you can simply just pass
# in a hollow reactor instead of hacking it in
#
# What about KRPC_Responder and KRPC_Iterator?
# - A pass through argument that floats up through
# the constructors
# TODO
def setUp(self):
self.monkey_patcher = MonkeyPatcher()
self.monkey_patcher.addPatch(krpc_sender, "reactor", HollowReactor())
self.monkey_patcher.patch()
self.k_iter = KRPC_Iterator()
self.k_iter.transport = HollowTransport()
self.target_id = 5
def tearDown(self):
self.monkey_patcher.restore()
#
# Find iterate test cases
#
def test_find_iterate_properNumberOfQueriesSent_noNodesInRT(self):
self._check_k_iter_sendsProperNumberOfQueries_noNodesInRT(
self.k_iter.find_iterate)
def test_find_iterate_firesAfterAllQueriesFire(self):
self._check_k_iter_firesAfterAllQueriesFire(
self.k_iter.find_iterate)
def test_find_iterate_usesNodesFromRoutingTable(self):
self._check_k_iter_usesNodesFromRoutingTable(
self.k_iter.find_iterate)
def test_find_iterate_noNodesRaisesIterationError(self):
self._check_k_iter_raisesIterationErrorOnNoSeedNodes(
self.k_iter.find_iterate)
def test_find_iterate_allQueriesTimeoutRaisesIterationError(self):
self._check_k_iter_failsWhenAllQueriesTimeOut(
self.k_iter.find_iterate)
def test_find_iterate_returnsNewNodes(self):
# deferreds is a (query, deferred) tuple list
(deferreds, d) = self._iterate_and_returnQueriesAndDeferreds(
self.k_iter.find_iterate)
num_queries = len(deferreds)
# Use any nodes as result nodes (even the nodes themselves)
result_nodes = test_nodes[:num_queries]
# Set up dummy node_id's
node_id = 1
for (query, deferred), node in zip(deferreds, result_nodes):
response = query.build_response(nodes=[node])
response._from = node_id
node_id += 1
deferred.callback(response)
expected_nodes = set(result_nodes)
d.addErrback(self._fail_errback)
d.addCallback(self._compare_nodes, expected_nodes)
# Make sure we don't accidentally slip past an
# uncalled deferred
self.assertTrue(d.called)
#
# Get iterate test cases
#
def test_get_iterate_properNumberOfQueriesSent_noNodesInRT(self):
self._check_k_iter_sendsProperNumberOfQueries_noNodesInRT(
self.k_iter.get_iterate)
def test_get_iterate_firesAfterAllQueriesFire(self):
self._check_k_iter_firesAfterAllQueriesFire(
self.k_iter.get_iterate)
def test_get_iterate_usesNodesFromRoutingTable(self):
self._check_k_iter_usesNodesFromRoutingTable(
self.k_iter.get_iterate)
def test_get_iterate_noNodesRaisesIterationError(self):
self._check_k_iter_raisesIterationErrorOnNoSeedNodes(
self.k_iter.get_iterate)
def test_get_iterate_allQueriesTimeoutRaisesIterationError(self):
self._check_k_iter_failsWhenAllQueriesTimeOut(
self.k_iter.get_iterate)
def test_get_iterate_returnsNewNodesAndPeers(self):
# deferreds is a (query, deferred) tuple list
# where each tuple corresponds to one outbound query
# and deferred result
#
# and d is a deferred result of the iter_func
(deferreds, d) = self._iterate_and_returnQueriesAndDeferreds(
#.........這裏部分代碼省略.........
示例9: RateLimiterPatcherTestCase
# 需要導入模塊: from twisted.python.monkey import MonkeyPatcher [as 別名]
# 或者: from twisted.python.monkey.MonkeyPatcher import restore [as 別名]
class RateLimiterPatcherTestCase(unittest.TestCase):
def setUp(self):
self.clock = Clock()
self.monkey_patcher = MonkeyPatcher()
self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
self.monkey_patcher.patch()
self.address = ("127.0.0.1", 55)
self.query = Query()
self.query.rpctype = "ping"
self.query._from = 15
self.query._transaction_id = 99
self.packet = krpc_coder.encode(self.query)
# Patch in hardcoded value for the bandwidth
# limits so that changing the constants will
# not effect the usefulness of this test case
# (The global bandwidth is set to 3 standard ping queries)
# (The per user bandwidth is set to 1 standard ping query)
self.monkey_patcher.addPatch(rate_limiter.constants,
"global_bandwidth_rate", 3 * len(self.packet))
self.monkey_patcher.addPatch(rate_limiter.constants,
"host_bandwidth_rate", 1 * len(self.packet))
self.monkey_patcher.patch()
def tearDown(self):
self.monkey_patcher.restore()
def _patched_sender(self):
ksender = KRPC_Sender(TreeRoutingTable, 2**50)
ksender.transport = HollowTransport()
# Start the protocol to simulate
# a regular environment
rate_limited_proto = RateLimiter_Patcher(ksender)
rate_limited_proto.startProtocol()
return rate_limited_proto
def test_inbound_overflowHostAndReset(self):
"""
Make sure that we cannot overflow our inbound host bandwidth limit
@see dhtbot.constants.host_bandwidth_rate
"""
rate_limited_proto = self._patched_sender()
counter = Counter()
rate_limited_proto.krpcReceived = counter
# One packet should be accepted without problems
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), self.address)
self.assertEquals(1, counter.count)
counter.reset()
# The second packet should be dropped
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), self.address)
self.assertEquals(0, counter.count)
# Reset the rate limiter and the next packet should
# be accepted
self.clock.set(1)
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), self.address)
self.assertEquals(1, counter.count)
def test_inbound_overflowGlobalAndReset(self):
"""
Make sure that we cannot overflow our inbound global bandwidth limit
@see dhtbot.constants.host_global_rate
"""
address1 = ("127.0.0.1", 66)
address2 = ("127.0.0.1", 76)
address3 = ("127.0.0.1", 86)
address4 = ("127.0.0.1", 555)
rate_limited_proto = self._patched_sender()
counter = Counter()
rate_limited_proto.krpcReceived = counter
# The first three packets should be accepted without
# any problems
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), address1)
self.assertEquals(1, counter.count)
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), address2)
self.assertEquals(2, counter.count)
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), address3)
self.assertEquals(3, counter.count)
# The fourth packet should be dropped
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), address4)
self.assertEquals(3, counter.count)
# Reset the rate limiter and the next packet should be
# accepted
self.clock.set(1)
rate_limited_proto.datagramReceived(
krpc_coder.encode(self.query), self.address)
self.assertEquals(4, counter.count)
def test_outbound_overflowHostAndReset(self):
"""
#.........這裏部分代碼省略.........