本文整理汇总了Python中nose.tools.assert_true函数的典型用法代码示例。如果您正苦于以下问题:Python assert_true函数的具体用法?Python assert_true怎么用?Python assert_true使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_true函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_make_dig_points
def test_make_dig_points():
"""Test application of Polhemus HSP to info"""
dig_points = _read_dig_points(hsp_fname)
info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
assert_false(info['dig'])
info['dig'] = _make_dig_points(dig_points=dig_points)
assert_true(info['dig'])
assert_array_equal(info['dig'][0]['r'], [-106.93, 99.80, 68.81])
dig_points = _read_dig_points(elp_fname)
nasion, lpa, rpa = dig_points[:3]
info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
assert_false(info['dig'])
info['dig'] = _make_dig_points(nasion, lpa, rpa, dig_points[3:], None)
assert_true(info['dig'])
idx = [d['ident'] for d in info['dig']].index(FIFF.FIFFV_POINT_NASION)
assert_array_equal(info['dig'][idx]['r'],
np.array([1.3930, 13.1613, -4.6967]))
assert_raises(ValueError, _make_dig_points, nasion[:2])
assert_raises(ValueError, _make_dig_points, None, lpa[:2])
assert_raises(ValueError, _make_dig_points, None, None, rpa[:2])
assert_raises(ValueError, _make_dig_points, None, None, None,
dig_points[:, :2])
assert_raises(ValueError, _make_dig_points, None, None, None, None,
dig_points[:, :2])
示例2: test_valid_signature
def test_valid_signature(self):
for example in self._examples:
client_shared_secret = example["private_key"]
client_certificate = example["certificate"]
public_key = example["public_key"]
url = example["url"]
method = example["method"]
oauth_params = example["oauth_params"]
expected_signature = example["oauth_signature"]
# Using the RSA private key.
assert_equal(expected_signature,
generate_rsa_sha1_signature(client_shared_secret,
method=method,
url=url,
oauth_params=oauth_params
)
)
# Using the X.509 certificate.
assert_true(verify_rsa_sha1_signature(
client_certificate, expected_signature,
method, url, oauth_params))
# Using the RSA public key.
assert_true(verify_rsa_sha1_signature(
public_key, expected_signature,
method, url, oauth_params))
示例3: test_em_gmm_cv
def test_em_gmm_cv():
# Comparison of different GMMs using cross-validation
# generate some data
dim = 2
xtrain = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim)))
xtest = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(1000, dim)))
#estimate different GMMs for xtrain, and test it on xtest
prec_type = 'full'
k, maxiter, delta = 2, 300, 1.e-4
ll = []
# model 1
lgmm = GMM(k,dim,prec_type)
lgmm.initialize(xtrain)
bic = lgmm.estimate(xtrain,maxiter, delta)
ll.append(lgmm.test(xtest).mean())
# model 2
prec_type = 'diag'
lgmm = GMM(k, dim, prec_type)
lgmm.initialize(xtrain)
bic = lgmm.estimate(xtrain, maxiter, delta)
ll.append(lgmm.test(xtest).mean())
for k in [1, 3, 10]:
lgmm = GMM(k,dim,prec_type)
lgmm.initialize(xtrain)
ll.append(lgmm.test(xtest).mean())
assert_true(ll[4] < ll[1])
示例4: test_check_threshold
def test_check_threshold():
adjacency_matrix = np.array([[1., 2.],
[2., 1.]])
name = 'edge_threshold'
calculate = 'fast_abs_percentile'
# a few not correctly formatted strings for 'edge_threshold'
wrong_edge_thresholds = ['0.1', '10', '10.2.3%', 'asdf%']
for wrong_edge_threshold in wrong_edge_thresholds:
assert_raises_regex(ValueError,
'{0}.+should be a number followed by '
'the percent sign'.format(name),
check_threshold,
wrong_edge_threshold, adjacency_matrix,
calculate, name)
threshold = object()
assert_raises_regex(TypeError,
'{0}.+should be either a number or a string'.format(name),
check_threshold,
threshold, adjacency_matrix,
calculate, name)
# To check if it also gives the score which is expected
assert_true(1. < check_threshold("50%", adjacency_matrix,
percentile_calculate=fast_abs_percentile,
name='threshold') <= 2.)
示例5: _compare
def _compare(a, b):
"""Compare two python objects."""
global last_keys
skip_types = ['whitener', 'proj', 'reginv', 'noisenorm', 'nchan',
'command_line', 'working_dir', 'mri_file', 'mri_id']
try:
if isinstance(a, (dict, Info)):
assert_true(isinstance(b, (dict, Info)))
for k, v in six.iteritems(a):
if k not in b and k not in skip_types:
raise ValueError('First one had one second one didn\'t:\n'
'%s not in %s' % (k, b.keys()))
if k not in skip_types:
last_keys.pop()
last_keys = [k] + last_keys
_compare(v, b[k])
for k, v in six.iteritems(b):
if k not in a and k not in skip_types:
raise ValueError('Second one had one first one didn\'t:\n'
'%s not in %s' % (k, a.keys()))
elif isinstance(a, list):
assert_true(len(a) == len(b))
for i, j in zip(a, b):
_compare(i, j)
elif isinstance(a, sparse.csr.csr_matrix):
assert_array_almost_equal(a.data, b.data)
assert_equal(a.indices, b.indices)
assert_equal(a.indptr, b.indptr)
elif isinstance(a, np.ndarray):
assert_array_almost_equal(a, b)
else:
assert_equal(a, b)
except Exception:
print(last_keys)
raise
示例6: test_default_diverging_vlims
def test_default_diverging_vlims(self):
p = mat._HeatMapper(self.df_norm, **self.default_kws)
vlim = max(abs(self.x_norm.min()), abs(self.x_norm.max()))
nt.assert_equal(p.vmin, -vlim)
nt.assert_equal(p.vmax, vlim)
nt.assert_true(p.divergent)
示例7: test_WilsonLT_Defaults_attrs1
def test_WilsonLT_Defaults_attrs1():
'''Confirm default geo_all equivalence in derived classes with base.'''
geos_all = [
'0-0-2000',
'0-0-1000',
'1000-0-0',
'600-0-800',
'600-0-400S',
'500-500-0',
'400-[200]-0',
'400-200-800',
'400-[200]-800',
'400-200-400S',
'400-[100,100]-0',
'500-[250,250]-0',
'400-[100,100]-800',
'400-[100,100]-400S',
'400-[100,100,100]-800',
'500-[50,50,50,50]-0',
'400-[100,100,100,100]-800',
'400-[100,100,100,100,100]-800'
]
default_attr1 = bdft.geos_all # Base attribute
default_attr2 = dft.geos_all # Sub-class attribute
expected = geos_all
#print(set(default_dict))
#print(set(expected)
# Allows extension in BaseDefaults().geo_inputs
actual1 = (set(default_attr1) >= set(expected))
actual2 = (set(default_attr2) >= set(expected))
#print(actual1)
# TODO: is this supposed to be assert_true?
nt.assert_true(actual1, expected)
nt.assert_true(actual2, expected)
示例8: given_a_created_product_with_name_group1
def given_a_created_product_with_name_group1(step, product_id):
body = dict_to_xml(default_product(name=product_id))
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
print response.content
world.product_id = response.json()[PRODUCT_NAME]
示例9: given_a_created_product_with_attributes_and_name_group1
def given_a_created_product_with_attributes_and_name_group1(step, product_id):
metadatas = create_default_metadata_or_attributes_list(2)
body = dict_to_xml(default_product(name=product_id, metadata=metadatas))
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
world.product_id = response.json()[PRODUCT_NAME]
示例10: step_impl
def step_impl(context):
driver = context.driver
util = context.util
element, parent, parent_text = get_element_parent_and_parent_text(
driver, ".__start_label._title_label")
# This is where our selection will end
end = util.element_screen_center(element)
end["left"] += 2 # Move it off-center for this test
element.click()
wedutil.wait_for_caret_to_be_in(util, parent)
# From the label to before the first letter and then past the
# first letter.
ActionChains(driver)\
.send_keys(*[Keys.ARROW_RIGHT] * 2)\
.perform()
# We need to get the location of the caret.
start = wedutil.caret_selection_pos(driver)
select_text(context, start, end)
assert_true(util.is_something_selected(), "something must be selected")
context.expected_selection = parent_text[0:1]
context.selection_parent = parent
context.caret_screen_position = wedutil.caret_screen_pos(driver)
示例11: test_registered_classes_can_be_set_as_attrs
def test_registered_classes_can_be_set_as_attrs(self):
app_registry.register('dummy', DummyAppDataContainer)
art = Article()
art.app_data.dummy = {'answer': 42}
tools.assert_true(isinstance(art.app_data.dummy, DummyAppDataContainer))
tools.assert_equals(DummyAppDataContainer(art, {'answer': 42}), art.app_data.dummy)
tools.assert_equals({'dummy': {'answer': 42}}, art.app_data)
示例12: test_decimate
def test_decimate():
"""Test decimation of digitizer headshapes with too many points."""
# load headshape and convert to meters
hsp_mm = _get_ico_surface(5)['rr'] * 100
hsp_m = hsp_mm / 1000.
# save headshape to a file in mm in temporary directory
tempdir = _TempDir()
sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
np.savetxt(sphere_hsp_path, hsp_mm)
# read in raw data using spherical hsp, and extract new hsp
with warnings.catch_warnings(record=True) as w:
raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
assert_true(any('more than' in str(ww.message) for ww in w))
# collect headshape from raw (should now be in m)
hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]
# with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
# should be a bit over 5000 points. If not, something is wrong or
# decimation resolution has been purposefully changed
assert_true(len(hsp_dec) > 5000)
# should have similar size, distance from center
dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
hsp_rad = np.mean(dist)
hsp_dec_rad = np.mean(dist_dec)
assert_almost_equal(hsp_rad, hsp_dec_rad, places=3)
示例13: test_validate_name
def test_validate_name(self):
# Test invalid names
invalid_name = '/invalid'
response = self.client.post('/desktop/api2/doc/mkdir', {'parent_uuid': json.dumps(self.home_dir.uuid), 'name': json.dumps(invalid_name)})
data = json.loads(response.content)
assert_equal(-1, data['status'], data)
assert_true('invalid character' in data['message'])
示例14: test_ica_rank_reduction
def test_ica_rank_reduction():
"""Test recovery of full data when no source is rejected"""
# Most basic recovery
raw = Raw(raw_fname).crop(0.5, stop, False)
raw.load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
n_components = 5
max_pca_components = len(picks)
for n_pca_components in [6, 10]:
with warnings.catch_warnings(record=True): # non-convergence
warnings.simplefilter('always')
ica = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components,
method='fastica', max_iter=1).fit(raw, picks=picks)
rank_before = raw.estimate_rank(picks=picks)
assert_equal(rank_before, len(picks))
raw_clean = ica.apply(raw, copy=True)
rank_after = raw_clean.estimate_rank(picks=picks)
# interaction between ICA rejection and PCA components difficult
# to preduct. Rank_after often seems to be 1 higher then
# n_pca_components
assert_true(n_components < n_pca_components <= rank_after <=
rank_before)
示例15: test_unicode_decode_error
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
# Check the old interface
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
charset='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("charset" in str(w[0].message).lower())