本文整理汇总了Python中mvpa2.testing.tools.assert_raises函数的典型用法代码示例。如果您正苦于以下问题:Python assert_raises函数的具体用法?Python assert_raises怎么用?Python assert_raises使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_raises函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_assert_objectarray_equal
def test_assert_objectarray_equal():
if versions['numpy'] < '1.4':
raise SkipTest("Skipping because of known segfaults with numpy < 1.4")
# explicit dtype so we could test with numpy < 1.6
a = np.array([np.array([0, 1]), np.array(1)], dtype=object)
b = np.array([np.array([0, 1]), np.array(1)], dtype=object)
# they should be ok for both types of comparison
for strict in True, False:
# good with self
assert_objectarray_equal(a, a, strict=strict)
# good with a copy
assert_objectarray_equal(a, a.copy(), strict=strict)
# good while operating with an identical one
# see http://projects.scipy.org/numpy/ticket/2117
assert_objectarray_equal(a, b, strict=strict)
# now check if we still fail for a good reason
for value_equal, b in (
(False, np.array(1)),
(False, np.array([1])),
(False, np.array([np.array([0, 1]), np.array((1, 2))], dtype=object)),
(False, np.array([np.array([0, 1]), np.array(1.1)], dtype=object)),
(True, np.array([np.array([0, 1]), np.array(1.0)], dtype=object)),
(True, np.array([np.array([0, 1]), np.array(1, dtype=object)], dtype=object)),
):
assert_raises(AssertionError, assert_objectarray_equal, a, b)
if value_equal:
# but should not raise for non-default strict=False
assert_objectarray_equal(a, b, strict=False)
else:
assert_raises(AssertionError, assert_objectarray_equal, a, b, strict=False)
示例2: test_mapper_vs_zscore
def test_mapper_vs_zscore():
"""Test by comparing to results of elderly z-score function
"""
# data: 40 sample feature line in 20d space (40x20; samples x features)
dss = [
dataset_wizard(np.concatenate(
[np.arange(40) for i in range(20)]).reshape(20,-1).T,
targets=1, chunks=1),
] + datasets.values()
for ds in dss:
ds1 = deepcopy(ds)
ds2 = deepcopy(ds)
zsm = ZScoreMapper(chunks_attr=None)
assert_raises(RuntimeError, zsm.forward, ds1.samples)
idhashes = (idhash(ds1), idhash(ds1.samples))
zsm.train(ds1)
idhashes_train = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_train)
# forward dataset
ds1z_ds = zsm.forward(ds1)
idhashes_forwardds = (idhash(ds1), idhash(ds1.samples))
# must not modify samples in place!
assert_equal(idhashes, idhashes_forwardds)
# forward samples explicitly
ds1z = zsm.forward(ds1.samples)
idhashes_forward = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_forward)
zscore(ds2, chunks_attr=None)
assert_array_almost_equal(ds1z, ds2.samples)
assert_array_equal(ds1.samples, ds.samples)
示例3: test_sphere_scaled
def test_sphere_scaled():
s1 = ne.Sphere(3)
s = ne.Sphere(3, element_sizes=(1, 1))
# Should give exactly the same results since element_sizes are 1s
for p in ((0, 0), (-23, 1)):
assert_array_equal(s1(p), s(p))
ok_(len(s(p)) == len(set(s(p))))
# Raise exception if query dimensionality does not match element_sizes
assert_raises(ValueError, s, (1,))
s = ne.Sphere(3, element_sizes=(1.5, 2))
assert_array_equal(s((0, 0)),
[(-2, 0), (-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 0), (0, 1),
(1, -1), (1, 0), (1, 1), (2, 0)])
s = ne.Sphere(1.5, element_sizes=(1.5, 1.5, 1.5))
res = s((0, 0, 0))
ok_(np.all([np.sqrt(np.sum(np.array(x)**2)) <= 1.5 for x in res]))
ok_(len(res) == 7)
# all neighbors so no more than 1 voxel away -- just a cube, for
# some "sphere" effect radius had to be 3.0 ;)
td = np.sqrt(3*1.5**2)
s = ne.Sphere(td, element_sizes=(1.5, 1.5, 1.5))
res = s((0, 0, 0))
ok_(np.all([np.sqrt(np.sum(np.array(x)**2)) <= td for x in res]))
ok_(np.all([np.sum(np.abs(x) > 1) == 0 for x in res]))
ok_(len(res) == 27)
示例4: test_basic_collectable
def test_basic_collectable():
c = Collectable()
# empty by default
assert_equal(c.name, None)
assert_equal(c.value, None)
assert_equal(c.__doc__, None)
# late assignment
c.name = 'somename'
c.value = 12345
assert_equal(c.name, 'somename')
assert_equal(c.value, 12345)
# immediate content
c = Collectable('value', 'myname', "This is a test")
assert_equal(c.name, 'myname')
assert_equal(c.value, 'value')
assert_equal(c.__doc__, "This is a test")
assert_equal(str(c), 'myname')
# repr
e = eval(repr(c))
assert_equal(e.name, 'myname')
assert_equal(e.value, 'value')
assert_equal(e.__doc__, "This is a test")
# shallow copy does not create a view of value array
c.value = np.arange(5)
d = copy.copy(c)
assert_false(d.value.base is c.value)
# names starting with _ are not allowed
assert_raises(ValueError, c._set_name, "_underscore")
示例5: test_corrstability_smoketest
def test_corrstability_smoketest(ds):
if not 'chunks' in ds.sa:
return
if len(ds.sa['targets'].unique) > 30:
# was regression dataset
return
# very basic testing since
cs = CorrStability()
#ds = datasets['uni2small']
out = cs(ds)
assert_equal(out.shape, (ds.nfeatures,))
ok_(np.all(out >= -1.001)) # it should be a correlation after all
ok_(np.all(out <= 1.001))
# and theoretically those nonbogus features should have higher values
if 'nonbogus_targets' in ds.fa:
bogus_features = np.array([x==None for x in ds.fa.nonbogus_targets])
assert_array_less(np.mean(out[bogus_features]), np.mean(out[~bogus_features]))
# and if we move targets to alternative location
ds = ds.copy(deep=True)
ds.sa['alt'] = ds.T
ds.sa.pop('targets')
assert_raises(KeyError, cs, ds)
cs = CorrStability('alt')
out_ = cs(ds)
assert_array_equal(out, out_)
示例6: test_sifter_with_balancing
def test_sifter_with_balancing():
# extended previous test which was already
# "... somewhat duplicating the doctest"
ds = Dataset(samples=np.arange(12).reshape((-1, 2)),
sa={'chunks': [ 0 , 1 , 2 , 3 , 4, 5 ],
'targets': ['c', 'c', 'c', 'p', 'p', 'p']})
# Without sifter -- just to assure that we do get all of them
# i.e. 6*5*4*3/(4!) = 15
par = ChainNode([NFoldPartitioner(cvtype=4, attr='chunks')])
assert_equal(len(list(par.generate(ds))), 15)
# so we will take 4 chunks out of available 7, but would care only
# about those partitions where we have balanced number of 'c' and 'p'
# entries
assert_raises(ValueError,
lambda x: list(Sifter([('targets', dict(wrong=1))]).generate(x)),
ds)
par = ChainNode([NFoldPartitioner(cvtype=4, attr='chunks'),
Sifter([('partitions', 2),
('targets',
dict(uvalues=['c', 'p'],
balanced=True))])
])
dss = list(par.generate(ds))
# print [ x[x.sa.partitions==2].sa.targets for x in dss ]
assert_equal(len(dss), 9)
for ds_ in dss:
testing = ds[ds_.sa.partitions == 2]
assert_array_equal(np.unique(testing.sa.targets), ['c', 'p'])
# and we still have both targets present in training
training = ds[ds_.sa.partitions == 1]
assert_array_equal(np.unique(training.sa.targets), ['c', 'p'])
示例7: test_permute_chunks
def test_permute_chunks():
def is_sorted(x):
return np.array_equal(np.sort(x), x)
ds = give_data()
# change targets labels
# there is no target labels permuting within chunks,
# assure = True would be error
ds.sa['targets'] = range(len(ds.sa.targets))
permutation = AttributePermutator(attr='targets',
chunk_attr='chunks',
strategy='chunks',
assure=True)
pds = permutation(ds)
assert_false(is_sorted(pds.sa.targets))
assert_true(np.array_equal(pds.samples, ds.samples))
for chunk_id in np.unique(pds.sa.chunks):
chunk_ds = pds[pds.sa.chunks == chunk_id]
assert_true(is_sorted(chunk_ds.sa.targets))
permutation = AttributePermutator(attr='targets',
strategy='chunks')
assert_raises(ValueError, permutation, ds)
示例8: test_product_flatten
def test_product_flatten():
nsamples = 17
product_name_values = [('chan', ['C1', 'C2']),
('freq', np.arange(4, 20, 6)),
('time', np.arange(-200, 800, 200))]
shape = (nsamples,) + tuple(len(v) for _, v in product_name_values)
sample_names = ['samp%d' % i for i in xrange(nsamples)]
# generate random data in four dimensions
data = np.random.normal(size=shape)
ds = Dataset(data, sa=dict(sample_names=sample_names))
# apply flattening to ds
flattener = ProductFlattenMapper(product_name_values)
# test I/O (only if h5py is available)
if externals.exists('h5py'):
from mvpa2.base.hdf5 import h5save, h5load
import tempfile
import os
_, testfn = tempfile.mkstemp('mapper.h5py', 'test_product')
h5save(testfn, flattener)
flattener = h5load(testfn)
os.unlink(testfn)
mds = flattener(ds)
prod = lambda x:reduce(operator.mul, x)
# ensure the size is ok
assert_equal(mds.shape, (nsamples,) + (prod(shape[1:]),))
ndim = len(product_name_values)
idxs = [range(len(v)) for _, v in product_name_values]
for si in xrange(nsamples):
for fi, p in enumerate(itertools.product(*idxs)):
data_tup = (si,) + p
x = mds[si, fi]
# value should match
assert_equal(data[data_tup], x.samples[0, 0])
# indices should match as well
all_idxs = tuple(x.fa['chan_freq_time_indices'].value.ravel())
assert_equal(p, all_idxs)
# values and indices in each dimension should match
for i, (name, value) in enumerate(product_name_values):
assert_equal(x.fa[name].value, value[p[i]])
assert_equal(x.fa[name + '_indices'].value, p[i])
product_name_values += [('foo', [1, 2, 3])]
flattener = ProductFlattenMapper(product_name_values)
assert_raises(ValueError, flattener, ds)
示例9: test_vector_alignment_find_rotation_illegal_inputs
def test_vector_alignment_find_rotation_illegal_inputs(self):
arr = np.asarray
illegal_args = [
[arr([1, 2]), arr([1, 3])],
[arr([1, 2, 3]), arr([1, 3])],
[arr([1, 2, 3]), np.random.normal(size=(3, 3))]
]
for illegal_arg in illegal_args:
assert_raises((ValueError, IndexError),
vector_alignment_find_rotation, *illegal_arg)
示例10: test_attrmap_conflicts
def test_attrmap_conflicts():
am_n = AttributeMap({'a':1, 'b':2, 'c':1})
am_t = AttributeMap({'a':1, 'b':2, 'c':1}, collisions_resolution='tuple')
am_l = AttributeMap({'a':1, 'b':2, 'c':1}, collisions_resolution='lucky')
q_f = ['a', 'b', 'a', 'c']
# should have no effect on forward mapping
ok_(np.all(am_n.to_numeric(q_f) == am_t.to_numeric(q_f)))
ok_(np.all(am_t.to_numeric(q_f) == am_l.to_numeric(q_f)))
assert_raises(ValueError, am_n.to_literal, [2])
r_t = am_t.to_literal([2, 1])
r_l = am_l.to_literal([2, 1])
示例11: test_mean_tpr
def test_mean_tpr():
# Let's test now on some disbalanced sets
assert_raises(ValueError, mean_tpr, [1], [])
assert_raises(ValueError, mean_tpr, [], [1])
assert_raises(ValueError, mean_tpr, [], [])
# now interesting one where there were no target when it was in predicted
assert_raises(ValueError, mean_tpr, [1], [0])
assert_raises(ValueError, mean_tpr, [0, 1], [0, 0])
# but it should be ok to have some targets not present in prediction
assert_equal(mean_tpr([0, 0], [0, 1]), .5)
# the same regardless how many samples in 0-class, if all misclassified
# (winner by # of samples takes all)
assert_equal(mean_tpr([0, 0, 0], [0, 0, 1]), .5)
# whenever mean-accuracy would be different
assert_almost_equal(mean_match_accuracy([0, 0, 0], [0, 0, 1]), 2/3.)
示例12: test_splitter
def test_splitter():
ds = give_data()
# split with defaults
spl1 = Splitter('chunks')
assert_raises(NotImplementedError, spl1, ds)
splits = list(spl1.generate(ds))
assert_equal(len(splits), len(ds.sa['chunks'].unique))
for split in splits:
# it should have perform basic slicing!
assert_true(split.samples.base is ds.samples)
assert_equal(len(split.sa['chunks'].unique), 1)
assert_true('lastsplit' in split.a)
assert_true(splits[-1].a.lastsplit)
# now again, more customized
spl2 = Splitter('targets', attr_values = [0,1,1,2,3,3,3], count=4,
noslicing=True)
splits = list(spl2.generate(ds))
assert_equal(len(splits), 4)
for split in splits:
# it should NOT have perform basic slicing!
assert_false(split.samples.base is ds.samples)
assert_equal(len(split.sa['targets'].unique), 1)
assert_equal(len(split.sa['chunks'].unique), 10)
assert_true(splits[-1].a.lastsplit)
# two should be identical
assert_array_equal(splits[1].samples, splits[2].samples)
# now go wild and split by feature attribute
ds.fa['roi'] = np.repeat([0,1], 5)
# splitter should auto-detect that this is a feature attribute
spl3 = Splitter('roi')
splits = list(spl3.generate(ds))
assert_equal(len(splits), 2)
for split in splits:
assert_true(split.samples.base is ds.samples)
assert_equal(len(split.fa['roi'].unique), 1)
assert_equal(split.shape, (100, 5))
# and finally test chained splitters
cspl = ChainNode([spl2, spl3, spl1])
splits = list(cspl.generate(ds))
# 4 target splits and 2 roi splits each and 10 chunks each
assert_equal(len(splits), 80)
示例13: test_collections
def test_collections():
sa = SampleAttributesCollection()
assert_equal(len(sa), 0)
assert_raises(ValueError, sa.__setitem__, 'test', 0)
l = range(5)
sa['test'] = l
# auto-wrapped
assert_true(isinstance(sa['test'], ArrayCollectable))
assert_equal(len(sa), 1)
# names which are already present in dict interface
assert_raises(ValueError, sa.__setitem__, 'values', range(5))
sa_c = copy.deepcopy(sa)
assert_equal(len(sa), len(sa_c))
assert_array_equal(sa.test, sa_c.test)
示例14: test_cached_query_engine
def test_cached_query_engine():
"""Test cached query engine
"""
sphere = ne.Sphere(1)
# dataset with just one "space"
ds = datasets['3dlarge']
qe0 = ne.IndexQueryEngine(myspace=sphere)
qec = ne.CachedQueryEngine(qe0)
# and ground truth one
qe = ne.IndexQueryEngine(myspace=sphere)
results_ind = []
results_kw = []
def cmp_res(res1, res2):
comp = [x == y for x, y in zip(res1, res2)]
ok_(np.all(comp))
for iq, q in enumerate((qe, qec)):
q.train(ds)
# sequential train on the same should be ok in both cases
q.train(ds)
res_ind = [q[fid] for fid in xrange(ds.nfeatures)]
res_kw = [q(myspace=x) for x in ds.fa.myspace]
# test if results match
cmp_res(res_ind, res_kw)
results_ind.append(res_ind)
results_kw.append(res_kw)
# now check if results of cached were the same as of regular run
cmp_res(results_ind[0], results_ind[1])
# Now do sanity checks
assert_raises(ValueError, qec.train, ds[:, :-1])
assert_raises(ValueError, qec.train, ds.copy())
ds2 = ds.copy()
qec.untrain()
qec.train(ds2)
# should be the same results on the copy
cmp_res(results_ind[0], [qec[fid] for fid in xrange(ds.nfeatures)])
cmp_res(results_kw[0], [qec(myspace=x) for x in ds.fa.myspace])
ok_(qec.train(ds2) is None)
示例15: test_query_engine
def test_query_engine():
data = np.arange(54)
# indices in 3D
ind = np.transpose((np.ones((3, 3, 3)).nonzero()))
# sphere generator for 3 elements diameter
sphere = ne.Sphere(1)
# dataset with just one "space"
ds = Dataset([data, data], fa={'s_ind': np.concatenate((ind, ind))})
# and the query engine attaching the generator to the "index-space"
qe = ne.IndexQueryEngine(s_ind=sphere)
# cannot train since the engine does not know about the second space
assert_raises(ValueError, qe.train, ds)
# now do it again with a full spec
ds = Dataset([data, data], fa={'s_ind': np.concatenate((ind, ind)),
't_ind': np.repeat([0,1], 27)})
qe = ne.IndexQueryEngine(s_ind=sphere, t_ind=None)
qe.train(ds)
# internal representation check
# YOH: invalid for new implementation with lookup tables (dictionaries)
#assert_array_equal(qe._searcharray,
# np.arange(54).reshape(qe._searcharray.shape) + 1)
# should give us one corner, collapsing the 't_ind'
assert_array_equal(qe(s_ind=(0, 0, 0)),
[0, 1, 3, 9, 27, 28, 30, 36])
# directly specifying an index for 't_ind' without having an ROI
# generator, should give the same corner, but just once
assert_array_equal(qe(s_ind=(0, 0, 0), t_ind=0), [0, 1, 3, 9])
# just out of the mask -- no match
assert_array_equal(qe(s_ind=(3, 3, 3)), [])
# also out of the mask -- but single match
assert_array_equal(qe(s_ind=(2, 2, 3), t_ind=1), [53])
# query by id
assert_array_equal(qe(s_ind=(0, 0, 0), t_ind=0), qe[0])
assert_array_equal(qe(s_ind=(0, 0, 0), t_ind=[0, 1]),
qe(s_ind=(0, 0, 0)))
# should not fail if t_ind is outside
assert_array_equal(qe(s_ind=(0, 0, 0), t_ind=[0, 1, 10]),
qe(s_ind=(0, 0, 0)))
# should fail if asked about some unknown thing
assert_raises(ValueError, qe.__call__, s_ind=(0, 0, 0), buga=0)
# Test by using some literal feature atttribute
ds.fa['lit'] = ['roi1', 'ro2', 'r3']*18
# should work as well as before
assert_array_equal(qe(s_ind=(0, 0, 0)), [0, 1, 3, 9, 27, 28, 30, 36])
# should fail if asked about some unknown (yet) thing
assert_raises(ValueError, qe.__call__, s_ind=(0,0,0), lit='roi1')
# Create qe which can query literals as well
qe_lit = ne.IndexQueryEngine(s_ind=sphere, t_ind=None, lit=None)
qe_lit.train(ds)
# should work as well as before
assert_array_equal(qe_lit(s_ind=(0, 0, 0)), [0, 1, 3, 9, 27, 28, 30, 36])
# and subselect nicely -- only /3 ones
assert_array_equal(qe_lit(s_ind=(0, 0, 0), lit='roi1'),
[0, 3, 9, 27, 30, 36])
assert_array_equal(qe_lit(s_ind=(0, 0, 0), lit=['roi1', 'ro2']),
[0, 1, 3, 9, 27, 28, 30, 36])