本文整理汇总了Python中nose.tools.assert_less_equal函数的典型用法代码示例。如果您正苦于以下问题:Python assert_less_equal函数的具体用法?Python assert_less_equal怎么用?Python assert_less_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_less_equal函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_notebooks_are_looked_up_in_cache
def check_notebooks_are_looked_up_in_cache():
api = make_mock_api()
api.list_notebooks()
api.list_notebooks()
assert_less_equal(api.note_store.listNotebooks.call_count, 1)
示例2: test_mem_parse_giant_table
def test_mem_parse_giant_table():
# Note: this test really wants to be run by itself in a process since it
# measures the *max* rss of the whole program. If python allocates
# a large object which goes away, the test will lie to us. Hence,
# kick_maxrss().
alive = kick_maxrss()
# Note: this has been tested with 1M row, and it works but it's slow.
# 100krow makes the point.
N_ROWS = 100000
table = make_table(N_ROWS, 4)
mem_before = getmaxrss_mb()
n = 0
for row in find_trs(BytesIO(table)):
n += 1
used = getmaxrss_mb() - mem_before
assert_equal(N_ROWS, n)
# Check that we didn't use more than 1MB to parse the table.
assert_less_equal(used, 1)
示例3: test_top_words
def test_top_words():
top_words = _get_top_words(test_model_output, TOP_WORDS)
nt.assert_equal(len(top_words), 2) # each entry is a topic
nt.assert_equal(len(top_words[1]), TOP_WORDS)
# ensure that word list is sorted prooperly, with descending weights
nt.assert_less_equal(top_words[0][1][0], top_words[0][0][0])
nt.assert_less_equal(top_words[0][-1][0], top_words[0][-2][0])
示例4: check_descriptor_between
def check_descriptor_between(self, catchment, descr, lower, upper):
nt.assert_greater_equal(getattr(catchment.descriptors, descr), lower,
msg="Catchment {} does not have a `descriptors.`{}>={}"
.format(catchment.id, descr, lower))
nt.assert_less_equal(getattr(catchment.descriptors, descr), upper,
msg="Catchment {} does not have a `descriptors.`{}<={}"
.format(catchment.id, descr, upper))
示例5: test_incentive_process
def test_incentive_process(lim=1e-14):
"""
Compare stationary distribution computations to known analytic form for
neutral landscape for the Moran process.
"""
for n, N in [(2, 10), (2, 40), (3, 10), (3, 20), (4, 10)]:
mu = (n - 1.) / n * 1./ (N + 1)
alpha = N * mu / (n - 1. - n * mu)
# Neutral landscape is the default
edges = incentive_process.compute_edges(N, num_types=n,
incentive_func=replicator, mu=mu)
for logspace in [False, True]:
stationary_1 = incentive_process.neutral_stationary(
N, alpha, n, logspace=logspace)
for exact in [False, True]:
stationary_2 = stationary_distribution(
edges, lim=lim, logspace=logspace, exact=exact)
for key in stationary_1.keys():
assert_almost_equal(
stationary_1[key], stationary_2[key], places=4)
# Check that the stationary distribution satisfies balance conditions
check_detailed_balance(edges, stationary_1)
check_global_balance(edges, stationary_1)
check_eigenvalue(edges, stationary_1)
# Test Entropy Rate bounds
er = entropy_rate(edges, stationary_1)
h = (2. * n - 1) / n * numpy.log(n)
assert_less_equal(er, h)
assert_greater_equal(er, 0)
示例6: elev_label_to_elev
def elev_label_to_elev(elev_label):
assert_greater_equal(elev_label, -1)
elev_degrees = 30 if elev_label == -1 else (elev_label * 5 + 30)
assert_greater_equal(elev_degrees, 30)
assert_less_equal(elev_degrees, 90)
return deg_to_rad(elev_degrees)
示例7: test_max_marginals
def test_max_marginals():
"""
Test that max-marginals are correct.
"""
for h in hypergraphs():
w = utils.random_viterbi_potentials(h)
print w.show(h)
path = ph.best_path(h, w)
best = w.dot(path)
print "BEST"
print "\n".join(["%20s : %s"%(edge.label, w[edge]) for edge in path.edges])
print best
nt.assert_not_equal(best, 0.0)
max_marginals = ph.compute_marginals(h, w)
for node in h.nodes:
other = max_marginals[node]
nt.assert_less_equal(other, best + 1e-4)
for edge in h.edges:
other = max_marginals[edge]
nt.assert_less_equal(other, best + 1e-4)
if edge in path:
nt.assert_almost_equal(other, best)
示例8: check_sum_of_calls
def check_sum_of_calls(object_, methods, maximum_calls, minimum_calls=1):
"""
Instruments the given methods on the given object to verify that the total sum of calls made to the
methods falls between minumum_calls and maximum_calls.
"""
mocks = {
method: Mock(wraps=getattr(object_, method))
for method in methods
}
with patch.multiple(object_, **mocks):
yield
call_count = sum(mock.call_count for mock in mocks.values())
calls = pprint.pformat({
method_name: mock.call_args_list
for method_name, mock in mocks.items()
})
# Assertion errors don't handle multi-line values, so pretty-print to std-out instead
if not minimum_calls <= call_count <= maximum_calls:
print "Expected between {} and {} calls, {} were made. Calls: {}".format(
minimum_calls,
maximum_calls,
call_count,
calls,
)
# verify the counter actually worked by ensuring we have counted greater than (or equal to) the minimum calls
assert_greater_equal(call_count, minimum_calls)
# now verify the number of actual calls is less than (or equal to) the expected maximum
assert_less_equal(call_count, maximum_calls)
示例9: check_mongo_calls
def check_mongo_calls(mongo_store, max_finds=0, max_sends=None):
"""
Instruments the given store to count the number of calls to find (incl find_one) and the number
of calls to send_message which is for insert, update, and remove (if you provide max_sends). At the
end of the with statement, it compares the counts to the max_finds and max_sends using a simple
assertLessEqual.
:param mongo_store: the MongoModulestore or subclass to watch
:param max_finds: the maximum number of find calls to allow
:param max_sends: If none, don't instrument the send calls. If non-none, count and compare to
the given int value.
"""
try:
find_wrap = Mock(wraps=mongo_store.collection.find)
wrap_patch = patch.object(mongo_store.collection, 'find', find_wrap)
wrap_patch.start()
if max_sends:
sends_wrap = Mock(wraps=mongo_store.database.connection._send_message)
sends_patch = patch.object(mongo_store.database.connection, '_send_message', sends_wrap)
sends_patch.start()
yield
finally:
wrap_patch.stop()
if max_sends:
sends_patch.stop()
assert_less_equal(sends_wrap.call_count, max_sends)
assert_less_equal(find_wrap.call_count, max_finds)
示例10: test_geojson_with_key
def test_geojson_with_key(self):
"""Test for reading GeoJSON files with a key"""
for filepath in self.filepaths:
records = io.read_geojson(filepath)
f = cv.records2geojson(records, key='id')
geojson = loads(f.read())
nt.assert_equal('FeatureCollection', geojson['type'])
nt.assert_true('crs' in geojson)
nt.assert_equal(self.bbox, geojson['bbox'])
nt.assert_true(geojson['features'])
for feature in geojson['features']:
nt.assert_equal('Feature', feature['type'])
nt.assert_true('id' in feature)
nt.assert_less_equal(2, len(feature['properties']))
geometry = feature['geometry']
if geometry['type'] == 'Point':
nt.assert_equal(2, len(geometry['coordinates']))
elif geometry['type'] == 'LineString':
nt.assert_equal(2, len(geometry['coordinates'][0]))
elif geometry['type'] == 'Polygon':
nt.assert_equal(2, len(geometry['coordinates'][0][0]))
示例11: test_def_rxtr_req_sample
def test_def_rxtr_req_sample():
s = RandomRequestPoint()
assert_equal(1, s.n_commods.sample())
assert_equal(1, s.n_request.sample())
assert_equal(1, s.assem_per_req.sample())
assert_false(s.assem_multi_commod.sample())
assert_equal(0, s.req_multi_commods.sample())
assert_false(s.exclusive.sample())
assert_equal(0, s.n_req_constr.sample())
assert_equal(1, s.n_supply.sample())
assert_equal(0, s.sup_multi.sample())
assert_equal(0, s.sup_multi_commods.sample())
assert_equal(1, s.n_sup_constr.sample())
assert_equal(1, s.sup_constr_val.sample())
assert_true(s.connection.sample())
s1 = RandomRequestPoint()
assert_equal(s1, s)
constr_avg = 0
pref_avg = 0
n = 5000
for i in range(n):
constr = s.constr_coeff.sample()
constr_avg += constr
assert_greater(constr, 0)
assert_less_equal(constr, 2)
pref = s.pref_coeff.sample()
pref_avg += pref
assert_greater(pref, 0)
assert_less_equal(pref, 1)
assert_almost_equal(1.0, constr_avg / n, places=1)
assert_almost_equal(0.5, pref_avg / n, places=1)
示例12: test_response_times_for_3_month_period
def test_response_times_for_3_month_period(self):
raise SkipTest("API isn't fast enough for this yet...")
url = (self.BASE_URL + self.BASE_PATH +
'&start=2014-08-01T00:00:00Z'
'&end=2014-11-01T00:00:00Z')
min_, max_, median = _get_url_response_times(url)
assert_less_equal(median, 1000)
示例13: init_sparse_bias
def init_sparse_bias(shared_variable, num_nonzeros, rng):
"""
Mimics the sparse initialization in
pylearn2.models.mlp.Linear.set_input_space()
"""
params = shared_variable.get_value()
assert_equal(params.shape[0], 1)
assert_greater_equal(num_nonzeros, 0)
assert_less_equal(num_nonzeros, params.shape[1])
params[...] = 0.0
indices = rng.choice(params.size, size=num_nonzeros, replace=False)
# normal dist with stddev=1.0
params[0, indices] = rng.randn(num_nonzeros)
# Found that for biases, this didn't help (it increased the
# final misclassification rate by .001)
# if num_nonzeros > 0:
# params /= float(num_nonzeros)
shared_variable.set_value(params)
示例14: on_epoch
def on_epoch(self):
'''
Loops through an epoch of the validation dataset.
'''
# Calls epoch_callbacks' on_start_training()
for epoch_callback in self._epoch_callbacks:
epoch_callback.on_start_training()
# Repeatedly calls epoch_callbacks' on_batch()
keep_going = True
while keep_going:
input_batches = self._input_iterator.next()
keep_going = not self._input_iterator.next_is_new_epoch()
# pylint: disable=star-args
computed_values = self._update_function(*input_batches)
value_index = 0
for epoch_callback in self._epoch_callbacks:
if isinstance(epoch_callback, IterationCallback):
new_value_index = (value_index +
len(epoch_callback.nodes_to_compute))
assert_less_equal(new_value_index, len(computed_values))
values = computed_values[value_index:new_value_index]
epoch_callback.on_iteration(values)
value_index = new_value_index
# Calls epoch_callbacks' on_epoch() methods.
for epoch_callback in self._epoch_callbacks:
epoch_callback.on_epoch()
示例15: test_get_next_candidate
def test_get_next_candidate(self):
"""
Tests the get next candidate function.
Tests:
- The candidate's parameters are acceptable
"""
cand = None
counter = 0
while cand is None and counter < 20:
cand = self.EAss.get_next_candidate()
time.sleep(0.1)
counter += 1
if counter == 20:
raise Exception("Received no result in the first 2 seconds.")
assert_is_none(cand.result)
params = cand.params
assert_less_equal(params["x"], 1)
assert_greater_equal(params["x"], 0)
assert_in(params["name"], self.param_defs["name"].values)
self.EAss.update(cand, "pausing")
time.sleep(1)
new_cand = None
while new_cand is None and counter < 20:
new_cand = self.EAss.get_next_candidate()
time.sleep(0.1)
counter += 1
if counter == 20:
raise Exception("Received no result in the first 2 seconds.")
assert_equal(new_cand, cand)