本文整理汇总了Python中mvpa2.testing.tools.assert_true函数的典型用法代码示例。如果您正苦于以下问题:Python assert_true函数的具体用法?Python assert_true怎么用?Python assert_true使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_true函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_permute_chunks
def test_permute_chunks():
def is_sorted(x):
return np.array_equal(np.sort(x), x)
ds = give_data()
# change targets labels
# there is no target labels permuting within chunks,
# assure = True would be error
ds.sa['targets'] = range(len(ds.sa.targets))
permutation = AttributePermutator(attr='targets',
chunk_attr='chunks',
strategy='chunks',
assure=True)
pds = permutation(ds)
assert_false(is_sorted(pds.sa.targets))
assert_true(np.array_equal(pds.samples, ds.samples))
for chunk_id in np.unique(pds.sa.chunks):
chunk_ds = pds[pds.sa.chunks == chunk_id]
assert_true(is_sorted(chunk_ds.sa.targets))
permutation = AttributePermutator(attr='targets',
strategy='chunks')
assert_raises(ValueError, permutation, ds)
示例2: test_log_exclusions
def test_log_exclusions():
ds = give_data()
ds.sa['time_coords'] = np.arange(len(ds))
# only mark the selection in an attribute
bal = Balancer()
balanced = bal(ds)
tmpfile = tempfile.mktemp()
logex = LogExclusions(tmpfile, append=False)
logged = logex(balanced)
subds = balanced[~balanced.sa['balanced_set'].value]
assert_true(logged is balanced)
with open(tmpfile, 'r') as fobj:
assert_true(fobj.readline().startswith('# New entry'))
excluded = np.genfromtxt(tmpfile, dtype='u1', delimiter=',')
assert_array_equal(excluded[:, 0], subds.sa.chunks)
assert_array_equal(excluded[:, 1], subds.sa.targets)
assert_array_equal(excluded[:, 2], subds.sa.time_coords)
os.unlink(tmpfile)
示例3: test_cosmo_repr_and_str
def test_cosmo_repr_and_str():
# simple smoke test for __repr__ and __str__
creators = (_create_small_mat_nbrhood_dict, _create_small_mat_dataset_dict)
for creator in creators:
obj = cosmo.from_any(creator())
for fmt in 'rs':
obj_str = (("%%%s" % fmt) % obj)
assert_true(obj.__class__.__name__ in obj_str)
示例4: test_eep_bin
def test_eep_bin():
eb = EEPBin(os.path.join(pymvpa_dataroot, 'eep.bin'))
assert_equal(eb.nchannels, 32)
assert_equal(eb.nsamples, 2)
assert_equal(eb.ntimepoints, 4)
assert_true(eb.t0 - eb.dt < 0.00000001)
assert_equal(len(eb.channels), 32)
assert_equal(eb.data.shape, (2, 32, 4))
示例5: test_balancer
def test_balancer():
ds = give_data()
# only mark the selection in an attribute
bal = Balancer()
res = bal(ds)
# we get a new dataset, with shared samples
assert_false(ds is res)
assert_true(ds.samples is res.samples.base)
# should kick out 2 samples in each chunk of 10
assert_almost_equal(np.mean(res.sa.balanced_set), 0.8)
# same as above, but actually apply the selection
bal = Balancer(apply_selection=True, count=5)
# just run it once
res = bal(ds)
# we get a new dataset, with shared samples
assert_false(ds is res)
# should kick out 2 samples in each chunk of 10
assert_equal(len(res), int(0.8 * len(ds)))
# now use it as a generator
dses = list(bal.generate(ds))
assert_equal(len(dses), 5)
# with limit
bal = Balancer(limit={'chunks': 3}, apply_selection=True)
res = bal(ds)
assert_equal(res.sa['chunks'].unique, (3,))
assert_equal(get_nelements_per_value(res.sa.targets).values(),
[2] * 4)
# same but include all offlimit samples
bal = Balancer(limit={'chunks': 3}, include_offlimit=True,
apply_selection=True)
res = bal(ds)
assert_array_equal(res.sa['chunks'].unique, range(10))
# chunk three still balanced, but the rest is not, i.e. all samples included
assert_equal(get_nelements_per_value(res[res.sa.chunks == 3].sa.targets).values(),
[2] * 4)
assert_equal(get_nelements_per_value(res.sa.chunks).values(),
[10, 10, 10, 8, 10, 10, 10, 10, 10, 10])
# fixed amount
bal = Balancer(amount=1, limit={'chunks': 3}, apply_selection=True)
res = bal(ds)
assert_equal(get_nelements_per_value(res.sa.targets).values(),
[1] * 4)
# fraction
bal = Balancer(amount=0.499, limit=None, apply_selection=True)
res = bal(ds)
assert_array_equal(
np.round(np.array(get_nelements_per_value(ds.sa.targets).values()) * 0.5),
np.array(get_nelements_per_value(res.sa.targets).values()))
# check on feature attribute
ds.fa['one'] = np.tile([1,2], 5)
ds.fa['chk'] = np.repeat([1,2], 5)
bal = Balancer(attr='one', amount=2, limit='chk', apply_selection=True)
res = bal(ds)
assert_equal(get_nelements_per_value(res.fa.one).values(),
[4] * 2)
示例6: test_attrpermute
def test_attrpermute():
ds = give_data()
ds.sa['ids'] = range(len(ds))
pristine_data = ds.samples.copy()
permutation = AttributePermutator(['targets', 'ids'], assure=True)
pds = permutation(ds)
# should not touch the data
assert_array_equal(pristine_data, pds.samples)
# even keep the very same array
assert_true(pds.samples.base is ds.samples)
# there is no way that it can be the same attribute
assert_false(np.all(pds.sa.ids == ds.sa.ids))
# ids should reflect permutation setup
assert_array_equal(pds.sa.targets, ds.sa.targets[pds.sa.ids])
# other attribute should remain intact
assert_array_equal(pds.sa.chunks, ds.sa.chunks)
# now chunk-wise permutation
permutation = AttributePermutator('ids', limit='chunks')
pds = permutation(ds)
# first ten should remain first ten
assert_false(np.any(pds.sa.ids[:10] > 9))
# same thing, but only permute single chunk
permutation = AttributePermutator('ids', limit={'chunks': 3})
pds = permutation(ds)
# one chunk should change
assert_false(np.any(pds.sa.ids[30:40] > 39))
assert_false(np.any(pds.sa.ids[30:40] < 30))
# the rest not
assert_array_equal(pds.sa.ids[:30], range(30))
# or a list of chunks
permutation = AttributePermutator('ids', limit={'chunks': [3,4]})
pds = permutation(ds)
# two chunks should change
assert_false(np.any(pds.sa.ids[30:50] > 49))
assert_false(np.any(pds.sa.ids[30:50] < 30))
# the rest not
assert_array_equal(pds.sa.ids[:30], range(30))
# and now try generating more permutations
nruns = 2
permutation = AttributePermutator(['targets', 'ids'], assure=True, count=nruns)
pds = list(permutation.generate(ds))
assert_equal(len(pds), nruns)
for p in pds:
assert_false(np.all(p.sa.ids == ds.sa.ids))
# permute feature attrs
ds.fa['ids'] = range(ds.shape[1])
permutation = AttributePermutator('fa.ids', assure=True)
pds = permutation(ds)
assert_false(np.all(pds.fa.ids == ds.fa.ids))
示例7: test_cosmo_do_not_store_unsupported_datatype
def test_cosmo_do_not_store_unsupported_datatype():
ds = Dataset(np.zeros((0, 0)))
class ArbitraryClass(object):
pass
ds.a['unused'] = ArbitraryClass()
c = cosmo.map2cosmo(ds)
assert_false('a' in c.keys())
ds.a['foo'] = np.zeros((1,))
c = cosmo.map2cosmo(ds)
assert_true('a' in c.keys())
示例8: test_identity_mapper
def test_identity_mapper(s):
idm = IdentityMapper()
# doesn't matter what you throw at it
assert_true(idm.forward(s) is s)
assert_true(idm.forward1(s) is s)
assert_true(idm.reverse(s) is s)
assert_true(idm.reverse1(s) is s)
示例9: test_splitter
def test_splitter():
ds = give_data()
# split with defaults
spl1 = Splitter('chunks')
assert_raises(NotImplementedError, spl1, ds)
splits = list(spl1.generate(ds))
assert_equal(len(splits), len(ds.sa['chunks'].unique))
for split in splits:
# it should have perform basic slicing!
assert_true(split.samples.base is ds.samples)
assert_equal(len(split.sa['chunks'].unique), 1)
assert_true('lastsplit' in split.a)
assert_true(splits[-1].a.lastsplit)
# now again, more customized
spl2 = Splitter('targets', attr_values = [0,1,1,2,3,3,3], count=4,
noslicing=True)
splits = list(spl2.generate(ds))
assert_equal(len(splits), 4)
for split in splits:
# it should NOT have perform basic slicing!
assert_false(split.samples.base is ds.samples)
assert_equal(len(split.sa['targets'].unique), 1)
assert_equal(len(split.sa['chunks'].unique), 10)
assert_true(splits[-1].a.lastsplit)
# two should be identical
assert_array_equal(splits[1].samples, splits[2].samples)
# now go wild and split by feature attribute
ds.fa['roi'] = np.repeat([0,1], 5)
# splitter should auto-detect that this is a feature attribute
spl3 = Splitter('roi')
splits = list(spl3.generate(ds))
assert_equal(len(splits), 2)
for split in splits:
assert_true(split.samples.base is ds.samples)
assert_equal(len(split.fa['roi'].unique), 1)
assert_equal(split.shape, (100, 5))
# and finally test chained splitters
cspl = ChainNode([spl2, spl3, spl1])
splits = list(cspl.generate(ds))
# 4 target splits and 2 roi splits each and 10 chunks each
assert_equal(len(splits), 80)
示例10: test_collections
def test_collections():
sa = SampleAttributesCollection()
assert_equal(len(sa), 0)
assert_raises(ValueError, sa.__setitem__, 'test', 0)
l = range(5)
sa['test'] = l
# auto-wrapped
assert_true(isinstance(sa['test'], ArrayCollectable))
assert_equal(len(sa), 1)
# names which are already present in dict interface
assert_raises(ValueError, sa.__setitem__, 'values', range(5))
sa_c = copy.deepcopy(sa)
assert_equal(len(sa), len(sa_c))
assert_array_equal(sa.test, sa_c.test)
示例11: test_glmnet_r
def test_glmnet_r():
# not the perfect dataset with which to test, but
# it will do for now.
#data = datasets['dumb2']
# for some reason the R code fails with the dumb data
data = datasets['chirp_linear']
clf = GLMNET_R()
clf.train(data)
# prediction has to be almost perfect
# test with a correlation
pre = clf.predict(data.samples)
corerr = corr_error(pre, data.targets)
if cfg.getboolean('tests', 'labile', default='yes'):
assert_true(corerr < .2)
示例12: test_array_collectable
def test_array_collectable():
c = ArrayCollectable()
# empty by default
assert_equal(c.name, None)
assert_equal(c.value, None)
# late assignment
c.name = 'somename'
assert_raises(ValueError, c._set, 12345)
assert_equal(c.value, None)
c.value = np.arange(5)
assert_equal(c.name, 'somename')
assert_array_equal(c.value, np.arange(5))
# immediate content
data = np.random.random(size=(3,10))
c = ArrayCollectable(data.copy(), 'myname',
"This is a test", length=3)
assert_equal(c.name, 'myname')
assert_array_equal(c.value, data)
assert_equal(c.__doc__, "This is a test")
assert_equal(str(c), 'myname')
# repr
from numpy import array
e = eval(repr(c))
assert_equal(e.name, 'myname')
assert_array_almost_equal(e.value, data)
assert_equal(e.__doc__, "This is a test")
# cannot assign array of wrong length
assert_raises(ValueError, c._set, np.arange(5))
assert_equal(len(c), 3)
# shallow copy DOES create a view of value array
c.value = np.arange(3)
d = copy.copy(c)
assert_true(d.value.base is c.value)
# names starting with _ are not allowed
assert_raises(ValueError, c._set_name, "_underscore")
示例13: test_transpose
def test_transpose():
from mvpa2.mappers.shape import TransposeMapper
ds = Dataset(np.arange(24).reshape(2, 3, 4), sa={"testsa": np.arange(2)}, fa={"testfa": np.arange(3)})
tp = TransposeMapper()
tds = tp(ds)
assert_equal(tds.shape, (3, 2, 4))
assert_true("testfa" in tds.sa)
assert_true("testsa" in tds.fa)
assert_false(tds.fa is tds.sa)
# and back
ttds = tp(tds)
assert_array_equal(ttds.samples, ds.samples)
assert_equal(ttds.sa, ds.sa)
assert_equal(ttds.fa, ds.fa)
# or this way
rds = tp.reverse(tds)
assert_array_equal(rds.samples, ds.samples)
assert_equal(rds.sa, ds.sa)
assert_equal(rds.fa, ds.fa)
assert_array_equal(rds.samples, ttds.samples)
assert_equal(rds.sa, ttds.sa)
assert_equal(rds.fa, ttds.fa)
示例14: test_slicing
def test_slicing(self):
hs = HalfPartitioner()
spl = Splitter(attr='partitions')
splits = list(hs.generate(self.data))
for s in splits:
# partitioned dataset shared the data
assert_true(s.samples.base is self.data.samples)
splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]
# with numpy 1.7.0b1 "chaining" was deprecated so let's create
# check function appropriate for the given numpy version
_a = np.arange(5)
__a = _a[:4][:3]
if __a.base is _a:
# 1.7.0b1
def is_the_same_base(x, base=self.data.samples):
return x.base is base
elif __a.base.base is _a:
# prior 1.7.0b1
def is_the_same_base(x, base=self.data.samples):
return x.base.base is base
else:
raise RuntimeError("Uknown handling of .base by numpy")
for s in splits:
# we get slicing all the time
assert_true(is_the_same_base(s[0].samples))
assert_true(is_the_same_base(s[1].samples))
spl = Splitter(attr='partitions', noslicing=True)
splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]
for s in splits:
# we no slicing at all
assert_false(s[0].samples.base is self.data.samples)
assert_false(s[1].samples.base is self.data.samples)
nfs = NFoldPartitioner()
spl = Splitter(attr='partitions')
splits = [ list(spl.generate(p)) for p in nfs.generate(self.data) ]
for i, s in enumerate(splits):
# training only first and last split
if i == 0 or i == len(splits) - 1:
assert_true(is_the_same_base(s[0].samples))
else:
assert_true(s[0].samples.base is None)
# we get slicing all the time
assert_true(s[1].samples.base.base is self.data.samples)
step_ds = Dataset(np.random.randn(20,2),
sa={'chunks': np.tile([0,1], 10)})
oes = OddEvenPartitioner()
spl = Splitter(attr='partitions')
splits = list(oes.generate(step_ds))
for s in splits:
# partitioned dataset shared the data
assert_true(s.samples.base is step_ds.samples)
splits = [ list(spl.generate(p)) for p in oes.generate(step_ds) ]
assert_equal(len(splits), 2)
for s in splits:
# we get slicing all the time
assert_true(s[0].samples.base.base is step_ds.samples)
assert_true(s[1].samples.base.base is step_ds.samples)
示例15: test_slicing
def test_slicing(self):
hs = HalfPartitioner()
spl = Splitter(attr='partitions')
splits = list(hs.generate(self.data))
for s in splits:
# partitioned dataset shared the data
assert_true(s.samples.base is self.data.samples)
splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]
for s in splits:
# we get slicing all the time
assert_true(s[0].samples.base.base is self.data.samples)
assert_true(s[1].samples.base.base is self.data.samples)
spl = Splitter(attr='partitions', noslicing=True)
splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]
for s in splits:
# we no slicing at all
assert_false(s[0].samples.base is self.data.samples)
assert_false(s[1].samples.base is self.data.samples)
nfs = NFoldPartitioner()
spl = Splitter(attr='partitions')
splits = [ list(spl.generate(p)) for p in nfs.generate(self.data) ]
for i, s in enumerate(splits):
# training only first and last split
if i == 0 or i == len(splits) - 1:
assert_true(s[0].samples.base.base is self.data.samples)
else:
assert_true(s[0].samples.base is None)
# we get slicing all the time
assert_true(s[1].samples.base.base is self.data.samples)
step_ds = Dataset(np.random.randn(20,2),
sa={'chunks': np.tile([0,1], 10)})
oes = OddEvenPartitioner()
spl = Splitter(attr='partitions')
splits = list(oes.generate(step_ds))
for s in splits:
# partitioned dataset shared the data
assert_true(s.samples.base is step_ds.samples)
splits = [ list(spl.generate(p)) for p in oes.generate(step_ds) ]
assert_equal(len(splits), 2)
for s in splits:
# we get slicing all the time
assert_true(s[0].samples.base.base is step_ds.samples)
assert_true(s[1].samples.base.base is step_ds.samples)