本文整理汇总了Python中tools.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: decommission_test
def decommission_test(self):
cluster = self.cluster
tokens = cluster.balanced_tokens(4)
cluster.populate(4, tokens=tokens).start()
node1, node2, node3, node4 = cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 2)
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=30000, consistency=ConsistencyLevel.QUORUM)
cluster.flush()
sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
init_size = sizes[0]
assert_almost_equal(*sizes)
time.sleep(.5)
node4.decommission()
node4.stop()
cluster.cleanup()
time.sleep(.5)
# Check we can get all the keys
for n in xrange(0, 30000):
query_c1c2(session, n, ConsistencyLevel.QUORUM)
sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
debug(sizes)
assert_almost_equal(sizes[0], sizes[1])
assert_almost_equal((2.0 / 3.0) * sizes[0], sizes[2])
assert_almost_equal(sizes[2], init_size)
示例2: setCustomFields
def setCustomFields(self, key, updates):
"""Set a list of fields for this issue in Jira
The updates parameter is a dictionary of key values where the key is the custom field name
and the value is the new value to set.
/!\ This only works for fields of type text.
"""
issue = self.getIssue(key)
update = {'fields': {}}
for updatename, updatevalue in updates.items():
remotevalue = issue.get('named').get(updatename)
if not remotevalue or remotevalue != updatevalue:
fieldkey = [k for k, v in issue.get('names').iteritems() if v == updatename][0]
update['fields'][fieldkey] = updatevalue
if not update['fields']:
# No fields to update
debug('No updates required')
return True
resp = self.request('issue/%s' % (str(key)), method='PUT', data=json.dumps(update))
if resp['status'] != 204:
raise JiraException('Issue was not updated: %s' % (str(resp['status'])))
return True
示例3: resolveMultiple
def resolveMultiple(self, names = []):
"""Return multiple instances"""
if type(names) != list:
if type(names) == str:
names = list(names)
else:
raise Exception('Unexpected variable type')
# Nothing has been passed, we use resolve()
if len(names) < 1:
M = self.resolve()
if M:
return [ M ]
else:
return [ ]
# Try to resolve each instance
result = []
for name in names:
M = self.resolve(name = name)
if M:
result.append(M)
else:
debug('Could not find instance called %s' % name)
return result
示例4: enable
def enable():
result = (None, None, None)
try:
debug('Enabling Behat')
result = self.cli('/admin/tool/behat/cli/util.php', args='--enable')
except:
pass
return result
示例5: install
def install():
result = (None, None, None)
try:
debug('Installing Behat tables')
result = self.cli('/admin/tool/behat/cli/util.php', args='--install', stdout=None, stderr=None)
except:
pass
return result
示例6: drop
def drop():
result = (None, None, None)
try:
debug('Dropping database')
result = self.cli('/admin/tool/behat/cli/util.php', args='--drop', stdout=None, stderr=None)
except:
pass
return result
示例7: export_products
def export_products(self, cr, uid, ids, context=None):
if context is None: context = {}
shop_ids = self.pool.get('sale.shop').search(cr, uid, [])
for referential_id in ids:
for shop in self.pool.get('sale.shop').browse(cr, uid, shop_ids, context):
context['conn_obj'] = self.external_connection(cr, uid, referential_id, context=context)
#shop.export_catalog
tools.debug((cr, uid, shop, context,))
shop.export_products(cr, uid, shop, context)
return True
示例8: checkCachedClones
def checkCachedClones(self, stable = True, integration = True):
"""Clone the official repository in a local cache"""
cacheStable = os.path.join(self.cache, 'moodle.git')
cacheIntegration = os.path.join(self.cache, 'integration.git')
if not os.path.isdir(cacheStable) and stable:
debug('Cloning stable repository into cache...')
result = process('%s clone %s %s' % (C.get('git'), C.get('remotes.stable'), cacheStable))
result = process('%s fetch -a' % C.get('git'), cacheStable)
if not os.path.isdir(cacheIntegration) and integration:
debug('Cloning integration repository into cache...')
result = process('%s clone %s %s' % (C.get('git'), C.get('remotes.integration'), cacheIntegration))
result = process('%s fetch -a' % C.get('git'), cacheIntegration)
示例9: _sale_shop
def _sale_shop(self, cr, uid, callback, context=None):
if context is None:
context = {}
proxy = self.pool.get('sale.shop')
domain = [ ('magento_shop', '=', True), ('auto_import', '=', True) ]
ids = proxy.search(cr, uid, domain, context=context)
if ids:
callback(cr, uid, ids, context=context)
tools.debug(callback)
tools.debug(ids)
return True
示例10: crash_during_decommission_test
def crash_during_decommission_test(self):
"""
If a node crashes whilst another node is being decommissioned,
upon restarting the crashed node should not have invalid entries
for the decommissioned node
@jira_ticket CASSANDRA-10231
"""
cluster = self.cluster
cluster.populate(3).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()[0:2]
t = DecommissionInParallel(node1)
t.start()
null_status_pattern = re.compile(".N(?:\s*)127\.0\.0\.1(?:.*)null(?:\s*)rack1")
while t.is_alive():
out = self.show_status(node2)
if null_status_pattern.search(out):
debug("Matched null status entry")
break
debug("Restarting node2")
node2.stop(gently=False)
node2.start(wait_for_binary_proto=True, wait_other_notice=False)
debug("Waiting for decommission to complete")
t.join()
self.show_status(node2)
debug("Sleeping for 30 seconds to allow gossip updates")
time.sleep(30)
out = self.show_status(node2)
self.assertFalse(null_status_pattern.search(out))
示例11: _zoook_sale_shop
def _zoook_sale_shop(self, cr, uid, callback, context=None):
"""
Sale Shop Schedules
"""
if context is None:
context = {}
ids = self.pool.get('sale.shop').search(cr, uid, [('zoook_shop', '=', True),('zoook_automatic_export', '=', True)], context=context)
if ids:
callback(cr, uid, ids, context=context)
tools.debug(callback)
tools.debug(ids)
return True
示例12: index_query_test
def index_query_test(self):
"""
Check that a secondary index query times out
"""
cluster = self.cluster
cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})
cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
"-Dcassandra.test.read_iteration_delay_ms=1500"]) # see above for explanation
node = cluster.nodelist()[0]
session = self.patient_cql_connection(node)
self.create_ks(session, 'ks', 1)
session.execute("""
CREATE TABLE test3 (
id int PRIMARY KEY,
col int,
val text
);
""")
session.execute("CREATE INDEX ON test3 (col)")
for i in xrange(500):
session.execute("INSERT INTO test3 (id, col, val) VALUES ({}, {}, 'foo')".format(i, i // 10))
mark = node.mark_log()
statement = session.prepare("SELECT * from test3 WHERE col < ? ALLOW FILTERING")
statement.consistency_level = ConsistencyLevel.ONE
statement.retry_policy = FallthroughRetryPolicy()
assert_unavailable(lambda c: debug(c.execute(statement, [50])), session)
node.watch_log_for("Some operations timed out", from_mark=mark, timeout=60)
示例13: index_query_test
def index_query_test(self):
"""
Check that a secondary index query times out
"""
cluster = self.cluster
cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})
cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.test.read_iteration_delay_ms=100"]) # see above for explanation
node = cluster.nodelist()[0]
session = self.patient_cql_connection(node)
self.create_ks(session, 'ks', 1)
session.execute("""
CREATE TABLE test3 (
id int PRIMARY KEY,
col int,
val text
);
""")
session.execute("CREATE INDEX ON test3 (col)")
for i in xrange(500):
session.execute("INSERT INTO test3 (id, col, val) VALUES ({}, {}, 'foo')".format(i, i // 10))
mark = node.mark_log()
assert_unavailable(lambda c: debug(c.execute("SELECT * from test3 WHERE col < 50 ALLOW FILTERING")), session)
node.watch_log_for("<SELECT \* FROM ks.test3 WHERE col < 50 (.*)> timed out", from_mark=mark, timeout=30)
示例14: materialized_view_test
def materialized_view_test(self):
"""
Check that a materialized view query times out
"""
cluster = self.cluster
cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})
cluster.populate(2)
node1, node2 = cluster.nodelist()
node1.start(wait_for_binary_proto=True, join_ring=False) # ensure other node executes queries
node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
"-Dcassandra.test.read_iteration_delay_ms=1500"]) # see above for explanation
session = self.patient_exclusive_cql_connection(node1)
self.create_ks(session, 'ks', 1)
session.execute("""
CREATE TABLE test4 (
id int PRIMARY KEY,
col int,
val text
);
""")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test4 "
"WHERE col IS NOT NULL AND id IS NOT NULL PRIMARY KEY (col, id)"))
for i in xrange(50):
session.execute("INSERT INTO test4 (id, col, val) VALUES ({}, {}, 'foo')".format(i, i // 10))
mark = node2.mark_log()
statement = SimpleStatement("SELECT * FROM mv WHERE col = 50", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
assert_unavailable(lambda c: debug(c.execute(statement)), session)
node2.watch_log_for("Some operations timed out", from_mark=mark, timeout=60)
示例15: local_query_test
def local_query_test(self):
"""
Check that a query running on the local coordinator node times out
"""
cluster = self.cluster
cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})
# cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
# introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
# iteration of non system queries, so that these queries take much longer to complete,
# see ReadCommand.withStateTracking()
cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.test.read_iteration_delay_ms=100"])
node = cluster.nodelist()[0]
session = self.patient_cql_connection(node)
self.create_ks(session, 'ks', 1)
session.execute("""
CREATE TABLE test1 (
id int PRIMARY KEY,
val text
);
""")
for i in xrange(500):
session.execute("INSERT INTO test1 (id, val) VALUES ({}, 'foo')".format(i))
mark = node.mark_log()
assert_unavailable(lambda c: debug(c.execute("SELECT * from test1")), session)
node.watch_log_for("<SELECT \* FROM ks.test1 (.*)> timed out", from_mark=mark, timeout=30)