本文整理汇总了Python中nose.tools.assert_greater函数的典型用法代码示例。如果您正苦于以下问题:Python assert_greater函数的具体用法?Python assert_greater怎么用?Python assert_greater使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_greater函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_gmlan
def test_gmlan():
p = connect_wo_esp()
if p.legacy:
return
# enable output mode
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
# enable CAN loopback mode
p.set_can_loopback(True)
p.set_can_speed_kbps(1, SPEED_NORMAL)
p.set_can_speed_kbps(2, SPEED_NORMAL)
p.set_can_speed_kbps(3, SPEED_GMLAN)
# set gmlan on CAN2
for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3, Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
p.set_gmlan(bus)
comp_kbps_gmlan = time_many_sends(p, 3)
assert_greater(comp_kbps_gmlan, 0.8 * SPEED_GMLAN)
assert_less(comp_kbps_gmlan, 1.0 * SPEED_GMLAN)
p.set_gmlan(None)
comp_kbps_normal = time_many_sends(p, bus)
assert_greater(comp_kbps_normal, 0.8 * SPEED_NORMAL)
assert_less(comp_kbps_normal, 1.0 * SPEED_NORMAL)
print("%d: %.2f kbps vs %.2f kbps" % (bus, comp_kbps_gmlan, comp_kbps_normal))
示例2: test_tokens
def test_tokens():
# Let's test on some Chinese text that has unusual combinations of
# syllables, because it is about an American vice-president.
#
# (He was the Chinese Wikipedia's featured article of the day when I
# wrote this test.)
hobart = '加勒特·霍巴特' # Garret Hobart, or "jiā lè tè huò bā tè".
# He was the sixth American vice president to die in office.
fact_simplified = '他是历史上第六位在任期内去世的美国副总统。'
fact_traditional = '他是歷史上第六位在任期內去世的美國副總統。'
# His name breaks into five pieces, with the only piece staying together
# being the one that means 'Bart'. The dot is not included as a token.
eq_(
tokenize(hobart, 'zh'),
['加', '勒', '特', '霍', '巴特']
)
eq_(
tokenize(fact_simplified, 'zh'),
[
# he / is / in history / #6 / counter for people
'他', '是', '历史上', '第六', '位',
# during / term of office / in / die
'在', '任期', '内', '去世',
# of / U.S. / deputy / president
'的', '美国', '副', '总统'
]
)
# You match the same tokens if you look it up in Traditional Chinese.
eq_(tokenize(fact_simplified, 'zh'), tokenize(fact_traditional, 'zh'))
assert_greater(word_frequency(fact_traditional, 'zh'), 0)
示例3: _check_marginal_samples_match_scores
def _check_marginal_samples_match_scores(server, row, fi):
row = loom.query.protobuf_to_data_row(row.diff)
row[fi] = None
to_sample = [i == fi for i in range(len(row))]
samples = server.sample(to_sample, row, SAMPLE_COUNT)
val = samples[0][fi]
base_score = server.score(row)
if isinstance(val, bool) or isinstance(val, int):
probs_dict = {}
samples = [sample[fi] for sample in samples]
for sample in set(samples):
row[fi] = sample
probs_dict[sample] = numpy.exp(
server.score(row) - base_score)
if len(probs_dict) == 1:
assert_almost_equal(probs_dict[sample], 1., places=SCORE_PLACES)
return
if min(probs_dict.values()) < MIN_CATEGORICAL_PROB:
return
gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
elif isinstance(val, float):
probs = numpy.exp([
server.score(sample) - base_score
for sample in samples
])
samples = [sample[fi] for sample in samples]
gof = density_goodness_of_fit(samples, probs, plot=True)
assert_greater(gof, MIN_GOODNESS_OF_FIT)
示例4: test_get_metro_artist_chart
def test_get_metro_artist_chart(self):
""" Testing Geo get Metro artist """
metro = "madrid"
country = "spain"
chart = self.geo.get_metro_artist_chart(metro=metro, country=country)
self.utils.assert_response_content(chart)
assert_greater(len(chart["topartists"]["artist"]), 5)
示例5: max_norm_arg
def max_norm_arg(arg):
arg = float(arg)
if arg < 0.0:
return numpy.inf
else:
assert_greater(arg, 0.0)
return arg
示例6: _test_suspender
def _test_suspender(suspender_class, sc_args, start_val, fail_val, resume_val, wait_time):
if sys.platform == "darwin":
# OSX event loop is different; resolve this later
raise KnownFailureTest()
my_suspender = suspender_class(RE, "BSTEST:VAL", *sc_args, sleep=wait_time)
print(my_suspender._lock)
pv = epics.PV("BSTEST:VAL")
putter = partial(pv.put, wait=True)
# make sure we start at good value!
putter(start_val)
# dumb scan
scan = [Msg("checkpoint"), Msg("sleep", None, 0.2)]
# paranoid
assert_equal(RE.state, "idle")
start = ttime.time()
# queue up fail and resume conditions
loop.call_later(0.1, putter, fail_val)
loop.call_later(1, putter, resume_val)
# start the scan
RE(scan)
stop = ttime.time()
# paranoid clean up of pv call back
my_suspender._pv.disconnect()
# assert we waited at least 2 seconds + the settle time
print(stop - start)
assert_greater(stop - start, 1 + wait_time + 0.2)
示例7: test_def_rxtr_req_sample
def test_def_rxtr_req_sample():
s = RandomRequestPoint()
assert_equal(1, s.n_commods.sample())
assert_equal(1, s.n_request.sample())
assert_equal(1, s.assem_per_req.sample())
assert_false(s.assem_multi_commod.sample())
assert_equal(0, s.req_multi_commods.sample())
assert_false(s.exclusive.sample())
assert_equal(0, s.n_req_constr.sample())
assert_equal(1, s.n_supply.sample())
assert_equal(0, s.sup_multi.sample())
assert_equal(0, s.sup_multi_commods.sample())
assert_equal(1, s.n_sup_constr.sample())
assert_equal(1, s.sup_constr_val.sample())
assert_true(s.connection.sample())
s1 = RandomRequestPoint()
assert_equal(s1, s)
constr_avg = 0
pref_avg = 0
n = 5000
for i in range(n):
constr = s.constr_coeff.sample()
constr_avg += constr
assert_greater(constr, 0)
assert_less_equal(constr, 2)
pref = s.pref_coeff.sample()
pref_avg += pref
assert_greater(pref, 0)
assert_less_equal(pref, 1)
assert_almost_equal(1.0, constr_avg / n, places=1)
assert_almost_equal(0.5, pref_avg / n, places=1)
示例8: test_userpass_success
def test_userpass_success(self):
"""AUTHENTICATION (REST): Username and password (correct credentials)."""
mw = []
headers = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
r = TestApp(app.wsgifunc(*mw)).get('/userpass', headers=headers, expect_errors=True)
assert_equal(r.status, 200)
assert_greater(len(r.header('X-Rucio-Auth-Token')), 32)
示例9: test_find_repetitive_in_range
def test_find_repetitive_in_range():
pos = 0
neg = 0
# some should be positive, others negative
for txid in known_juncs:
expected = txid in cross_hash_seqs
if expected == True:
pos += 1
else:
neg += 1
my_query_juncs = query_juncs.get(txid,[])
for query_junc in my_query_juncs:
minus_range, plus_range = find_match_range(query_junc,seqs,20)
yield check_find_repetitive_in_range, query_junc, minus_range, plus_range, expected
# all negative
for txid in unmatched_query_juncs:
my_query_juncs = unmatched_query_juncs.get(txid,[])
for query_junc in my_query_juncs:
minus_range, plus_range = find_match_range(query_junc,seqs,20)
yield check_find_repetitive_in_range, query_junc, minus_range, plus_range, False
# make sure we found a bunch of each type
assert_greater(pos,0)
assert_greater(neg,0)
示例10: test_create_tiids_from_aliases
def test_create_tiids_from_aliases(self):
aliases = [('url', 'http://starbucks.com'), ('url', 'http://www.plosmedicine.org/article/info:doi/10.1371/journal.pmed.0020124')]
response = item_module.create_tiids_from_aliases(aliases, self.r)
print response
assert_greater(len(response.keys()))
示例11: test_switch_to_ad3
def test_switch_to_ad3():
# test if switching between qpbo and ad3 works
if not get_installed(['qpbo']) or not get_installed(['ad3']):
return
X, Y = toy.generate_blocks_multinomial(n_samples=5, noise=1.5,
seed=0)
crf = GridCRF(n_states=3, inference_method='qpbo')
ssvm = NSlackSSVM(crf, max_iter=10000)
ssvm_with_switch = NSlackSSVM(crf, max_iter=10000, switch_to=('ad3'))
ssvm.fit(X, Y)
ssvm_with_switch.fit(X, Y)
assert_equal(ssvm_with_switch.model.inference_method, 'ad3')
# we check that the dual is higher with ad3 inference
# as it might use the relaxation, that is pretty much guraranteed
assert_greater(ssvm_with_switch.objective_curve_[-1],
ssvm.objective_curve_[-1])
print(ssvm_with_switch.objective_curve_[-1], ssvm.objective_curve_[-1])
# test that convergence also results in switch
ssvm_with_switch = NSlackSSVM(crf, max_iter=10000, switch_to=('ad3'),
tol=10)
ssvm_with_switch.fit(X, Y)
assert_equal(ssvm_with_switch.model.inference_method, 'ad3')
示例12: test_create_missing_tiids_from_aliases
def test_create_missing_tiids_from_aliases(self):
aliases_tiids_map = {('url', 'http://starbucks.com'): None, ('url', 'http://www.plosmedicine.org/article/info:doi/10.1371/journal.pmed.0020124'): u'test'}
response = item_module.create_missing_tiids_from_aliases(aliases_tiids_map, self.r)
print response
assert_greater(len(aliases_tiids_map[('url', 'http://starbucks.com')]), 10)
示例13: test_standard_svm_blobs_2d_class_weight
def test_standard_svm_blobs_2d_class_weight():
# no edges, reduce to crammer-singer svm
X, Y = make_blobs(n_samples=210, centers=3, random_state=1, cluster_std=3,
shuffle=False)
X = np.hstack([X, np.ones((X.shape[0], 1))])
X, Y = X[:170], Y[:170]
X_graphs = [(x[np.newaxis, :], np.empty((0, 2), dtype=np.int)) for x in X]
pbl = GraphCRF(n_features=3, n_states=3, inference_method='unary')
svm = OneSlackSSVM(pbl, check_constraints=False, C=1000)
svm.fit(X_graphs, Y[:, np.newaxis])
weights = 1. / np.bincount(Y)
weights *= len(weights) / np.sum(weights)
pbl_class_weight = GraphCRF(n_features=3, n_states=3, class_weight=weights,
inference_method='unary')
svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10,
check_constraints=False,
break_on_bad=False)
svm_class_weight.fit(X_graphs, Y[:, np.newaxis])
assert_greater(f1_score(Y, np.hstack(svm_class_weight.predict(X_graphs))),
f1_score(Y, np.hstack(svm.predict(X_graphs))))
示例14: test_gmlan_bad_toggle
def test_gmlan_bad_toggle():
p = connect_wo_esp()
if p.legacy:
return
# enable output mode
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
# enable CAN loopback mode
p.set_can_loopback(True)
# GMLAN_CAN2
for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
p.set_gmlan(bus)
comp_kbps_gmlan = time_many_sends(p, 3)
assert_greater(comp_kbps_gmlan, 0.6 * SPEED_GMLAN)
assert_less(comp_kbps_gmlan, 1.0 * SPEED_GMLAN)
# normal
for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
p.set_gmlan(None)
comp_kbps_normal = time_many_sends(p, bus)
assert_greater(comp_kbps_normal, 0.6 * SPEED_NORMAL)
assert_less(comp_kbps_normal, 1.0 * SPEED_NORMAL)
示例15: test_dbpedia_spotlight
def test_dbpedia_spotlight():
en_text = (u"Will the efforts of artists like Moby"
u" help to preserve the Arctic?")
nl_text = (u"Ik kan me iets herrinneren over de burgemeester van"
u" Amstelveen en het achterwerk van M\xe1xima."
u" Verder was Koningsdag een zwart gat.")
en_annotations = dbpedia_spotlight(en_text, lang='en')
nl_annotations = dbpedia_spotlight(nl_text, lang='nl')
# Expect `Arctic` and `Moby` to be found in en_text
assert_equal(len(en_annotations), 2)
for ann in en_annotations:
assert_in(ann['name'], {'Arctic', 'Moby'})
# The disambiguation candidates should be of type list
assert_true(isinstance(ann['resource'], list))
# In this case, the top candidate's uri == the name
assert_equal(ann['name'], ann['resource'][0]['uri'])
# Expect {"burgemeester", "Amstelveen", u"M\xe1xima",
# "Koningsdag", "zwart gat"} to be found in nl_text
assert_equal(len(nl_annotations), 5)
sf_set = set([ann['name'] for ann in nl_annotations])
assert_equal(sf_set, {u"burgemeester", u"Amstelveen", u"M\xe1xima",
u"Koningsdag", u"zwart gat"})
for ann in en_annotations:
# The disambiguation candidates should be of type list
assert_true(isinstance(ann['resource'], list))
# There should be at least one candidate
assert_greater(ann['resource'], 0)