本文整理汇总了Python中mvpa2.datasets.base.Dataset.from_wizard方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.from_wizard方法的具体用法?Python Dataset.from_wizard怎么用?Python Dataset.from_wizard使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mvpa2.datasets.base.Dataset
的用法示例。
在下文中一共展示了Dataset.from_wizard方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_stack_add_attributes
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_stack_add_attributes():
data0 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
data1 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
data0.fa['ok'] = data0.sa['ok'] = np.arange(5)
data1.fa['ok'] = data1.sa['ok'] = np.arange(5)
data0.fa['nok'] = data0.sa['nok'] = [0]
data1.fa['nok'] = data1.sa['nok'] = np.arange(5)
# function, collection name, the other collection name
for xstack, colname, ocolname in ((vstack, 'fa', 'sa'),
(hstack, 'sa', 'fa')):
for add_param in None, 'update', 'drop_nonunique':
kw = {colname: add_param} if add_param else {}
r = xstack((data0, data1), **kw)
COL = lambda x: getattr(x, colname)
col = COL(r)
ocol = getattr(r, ocolname)
# in any scenario, the other collection should have got
# both names and be just fine
assert_array_equal(ocol['nok'].value, [0] * 5 + range(5))
assert_array_equal(ocol['ok'].value, range(5) * 2)
if add_param in ('update',):
# will be of the last dataset
assert_array_equal(col['nok'].value, COL(data1)['nok'].value)
assert_array_equal(col['ok'].value, COL(data1)['ok'].value)
elif add_param in (None, 'drop_nonunique'):
assert('nok' not in col) # must be dropped since not unique
# both the same but let's check ;)
assert_array_equal(col['ok'].value, COL(data0)['ok'].value)
assert_array_equal(col['ok'].value, COL(data1)['ok'].value)
示例2: test_mergeds
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_mergeds():
data0 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
data0.fa['one'] = np.ones(5)
data1 = Dataset.from_wizard(np.ones((5, 5)), targets=1, chunks=1)
data1.fa['one'] = np.zeros(5)
data2 = Dataset.from_wizard(np.ones((3, 5)), targets=2, chunks=1)
data3 = Dataset.from_wizard(np.ones((4, 5)), targets=2)
data4 = Dataset.from_wizard(np.ones((2, 5)), targets=3, chunks=2)
data4.fa['test'] = np.arange(5)
# cannot merge if there are attributes missing in one of the datasets
assert_raises(DatasetError, data1.append, data0)
merged = data1.copy()
merged.append(data2)
ok_( merged.nfeatures == 5 )
l12 = [1]*5 + [2]*3
l1 = [1]*8
ok_((merged.targets == l12).all())
ok_((merged.chunks == l1).all())
data_append = data1.copy()
data_append.append(data2)
ok_(data_append.nfeatures == 5)
ok_((data_append.targets == l12).all())
ok_((data_append.chunks == l1).all())
#
# appending
#
# we need the same samples attributes in both datasets
assert_raises(DatasetError, data2.append, data3)
#
# vstacking
#
if __debug__:
# tested only in __debug__
assert_raises(ValueError, vstack, (data0, data1, data2, data3))
datasets = (data1, data2, data4)
merged = vstack(datasets)
assert_equal(merged.shape,
(np.sum([len(ds) for ds in datasets]), data1.nfeatures))
assert_true('test' in merged.fa)
assert_array_equal(merged.sa.targets, [1]*5 + [2]*3 + [3]*2)
#
# hstacking
#
assert_raises(ValueError, hstack, datasets)
datasets = (data0, data1)
merged = hstack(datasets)
assert_equal(merged.shape,
(len(data1), np.sum([ds.nfeatures for ds in datasets])))
assert_true('chunks' in merged.sa)
assert_array_equal(merged.fa.one, [1]*5 + [0]*5)
示例3: test_stack_add_dataset_attributes
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_stack_add_dataset_attributes():
data0 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
data0.a['one'] = np.ones(2)
data0.a['two'] = 2
data0.a['three'] = 'three'
data0.a['common'] = range(10)
data0.a['array'] = np.arange(10)
data1 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
data1.a['one'] = np.ones(3)
data1.a['two'] = 3
data1.a['four'] = 'four'
data1.a['common'] = range(10)
data1.a['array'] = np.arange(10)
vstacker = lambda x: vstack((data0, data1), a=x)
hstacker = lambda x: hstack((data0, data1), a=x)
add_params = (1, None, 'unique', 'uniques', 'all', 'drop_nonunique')
for stacker in (vstacker, hstacker):
for add_param in add_params:
if add_param == 'unique':
assert_raises(DatasetError, stacker, add_param)
continue
r = stacker(add_param)
if add_param == 1:
assert_array_equal(data1.a.one, r.a.one)
assert_equal(r.a.two, 3)
assert_equal(r.a.four, 'four')
assert_true('three' not in r.a.keys())
assert_true('array' in r.a.keys())
elif add_param == 'uniques':
assert_equal(set(r.a.keys()),
set(['one', 'two', 'three',
'four', 'common', 'array']))
assert_equal(r.a.two, (2, 3))
assert_equal(r.a.four, ('four',))
elif add_param == 'all':
assert_equal(set(r.a.keys()),
set(['one', 'two', 'three',
'four', 'common', 'array']))
assert_equal(r.a.two, (2, 3))
assert_equal(r.a.three, ('three', None))
elif add_param == 'drop_nonunique':
assert_equal(set(r.a.keys()),
set(['common', 'three', 'four', 'array']))
assert_equal(r.a.three, 'three')
assert_equal(r.a.four, 'four')
assert_equal(r.a.common, range(10))
assert_array_equal(r.a.array, np.arange(10))
示例4: test_labelpermutation_randomsampling
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_labelpermutation_randomsampling():
ds = Dataset.from_wizard(np.ones((5, 10)), targets=range(5), chunks=1)
for i in xrange(1, 5):
ds.append(Dataset.from_wizard(np.ones((5, 10)) + i,
targets=range(5), chunks=i+1))
# assign some feature attributes
ds.fa['roi'] = np.repeat(np.arange(5), 2)
ds.fa['lucky'] = np.arange(10)%2
# use subclass for testing if it would survive
ds.samples = ds.samples.view(myarray)
ok_(ds.get_nsamples_per_attr('targets') == {0:5, 1:5, 2:5, 3:5, 4:5})
sample = ds.random_samples(2)
ok_(sample.get_nsamples_per_attr('targets').values() == [ 2, 2, 2, 2, 2 ])
ok_((ds.sa['chunks'].unique == range(1, 6)).all())
示例5: test_feature_masking
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_feature_masking():
mask = np.zeros((5, 3), dtype='bool')
mask[2, 1] = True
mask[4, 0] = True
data = Dataset.from_wizard(np.arange(60).reshape((4, 5, 3)),
targets=1, chunks=1, mask=mask)
# check simple masking
ok_(data.nfeatures == 2)
# selection should be idempotent
ok_(data[:, mask].nfeatures == data.nfeatures)
# check that correct feature get selected
assert_array_equal(data[:, 1].samples[:, 0], [12, 27, 42, 57])
# XXX put back when coord -> fattr is implemented
#ok_(tuple(data[:, 1].a.mapper.getInId(0)) == (4, 0))
ok_(data[:, 1].a.mapper.forward1(mask).shape == (1,))
# check sugarings
# XXX put me back
#self.failUnless(np.all(data.I == data.origids))
assert_array_equal(data.C, data.chunks)
assert_array_equal(data.UC, np.unique(data.chunks))
assert_array_equal(data.T, data.targets)
assert_array_equal(data.UT, np.unique(data.targets))
assert_array_equal(data.S, data.samples)
assert_array_equal(data.O, data.mapper.reverse(data.samples))
示例6: test_labelschunks_access
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_labelschunks_access():
samples = np.arange(12).reshape((4, 3)).view(myarray)
labels = range(4)
chunks = [1, 1, 2, 2]
ds = Dataset.from_wizard(samples, labels, chunks)
# array subclass survives
ok_(isinstance(ds.samples, myarray))
assert_array_equal(ds.targets, labels)
assert_array_equal(ds.chunks, chunks)
# moreover they should point to the same thing
ok_(ds.targets is ds.sa.targets)
ok_(ds.targets is ds.sa['targets'].value)
ok_(ds.chunks is ds.sa.chunks)
ok_(ds.chunks is ds.sa['chunks'].value)
# assignment should work at all levels including 1st
ds.targets = chunks
assert_array_equal(ds.targets, chunks)
ok_(ds.targets is ds.sa.targets)
ok_(ds.targets is ds.sa['targets'].value)
# test broadcasting
# but not for plain scalars
assert_raises(ValueError, ds.set_attr, 'sa.bc', 5)
# and not for plain plain str
assert_raises(TypeError, ds.set_attr, 'sa.bc', "mike")
# but for any iterable of len == 1
ds.set_attr('sa.bc', (5,))
ds.set_attr('sa.dc', ["mike"])
assert_array_equal(ds.sa.bc, [5] * len(ds))
assert_array_equal(ds.sa.dc, ["mike"] * len(ds))
示例7: test_origmask_extraction
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_origmask_extraction():
origdata = np.random.standard_normal((10, 2, 4, 3))
data = Dataset.from_wizard(origdata, targets=2, chunks=2)
# check with custom mask
sel = data[:, 5]
ok_(sel.samples.shape[1] == 1)
示例8: test_multidim_attrs
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_multidim_attrs():
samples = np.arange(24).reshape(2, 3, 4)
# have a dataset with two samples -- mapped from 2d into 1d
# but have 2d labels and 3d chunks -- whatever that is
ds = Dataset.from_wizard(samples.copy(),
targets=samples.copy(),
chunks=np.random.normal(size=(2,10,4,2)))
assert_equal(ds.nsamples, 2)
assert_equal(ds.nfeatures, 12)
assert_equal(ds.sa.targets.shape, (2, 3, 4))
assert_equal(ds.sa.chunks.shape, (2, 10, 4, 2))
# try slicing
subds = ds[0]
assert_equal(subds.nsamples, 1)
assert_equal(subds.nfeatures, 12)
assert_equal(subds.sa.targets.shape, (1, 3, 4))
assert_equal(subds.sa.chunks.shape, (1, 10, 4, 2))
# add multidim feature attr
fattr = ds.mapper.forward(samples)
assert_equal(fattr.shape, (2, 12))
# should puke -- first axis is #samples
assert_raises(ValueError, ds.fa.__setitem__, 'moresamples', fattr)
# but that should be fine
ds.fa['moresamples'] = fattr.T
assert_equal(ds.fa.moresamples.shape, (12, 2))
示例9: get_data
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def get_data(self):
data = np.random.standard_normal(( 100, 2, 2, 2 ))
labels = np.concatenate( ( np.repeat( 0, 50 ),
np.repeat( 1, 50 ) ) )
chunks = np.repeat( range(5), 10 )
chunks = np.concatenate( (chunks, chunks) )
return Dataset.from_wizard(samples=data, targets=labels, chunks=chunks)
示例10: test_samples_shape
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_samples_shape():
ds = Dataset.from_wizard(np.ones((10, 2, 3, 4)), targets=1, chunks=1)
ok_(ds.samples.shape == (10, 24))
# what happens to 1D samples
ds = Dataset(np.arange(5))
assert_equal(ds.shape, (5, 1))
assert_equal(ds.nfeatures, 1)
示例11: test_ex_from_masked
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_ex_from_masked():
ds = Dataset.from_wizard(samples=np.atleast_2d(np.arange(5)).view(myarray),
targets=1, chunks=1)
# simple sequence has to be a single pattern
assert_equal(ds.nsamples, 1)
# array subclass survives
ok_(isinstance(ds.samples, myarray))
# check correct pattern layout (1x5)
assert_array_equal(ds.samples, [[0, 1, 2, 3, 4]])
# check for single label and origin
assert_array_equal(ds.targets, [1])
assert_array_equal(ds.chunks, [1])
# now try adding pattern with wrong shape
assert_raises(DatasetError, ds.append,
Dataset.from_wizard(np.ones((2,3)), targets=1, chunks=1))
# now add two real patterns
ds.append(Dataset.from_wizard(np.random.standard_normal((2, 5)),
targets=2, chunks=2))
assert_equal(ds.nsamples, 3)
assert_array_equal(ds.targets, [1, 2, 2])
assert_array_equal(ds.chunks, [1, 2, 2])
# test unique class labels
ds.append(Dataset.from_wizard(np.random.standard_normal((2, 5)),
targets=3, chunks=5))
assert_array_equal(ds.sa['targets'].unique, [1, 2, 3])
# test wrong attributes length
assert_raises(ValueError, Dataset.from_wizard,
np.random.standard_normal((4,2,3,4)), targets=[1, 2, 3],
chunks=2)
assert_raises(ValueError, Dataset.from_wizard,
np.random.standard_normal((4,2,3,4)), targets=[1, 2, 3, 4],
chunks=[2, 2, 2])
# no test one that is using from_masked
ds = datasets['3dlarge']
for a in ds.sa:
assert_equal(len(ds.sa[a].value), len(ds))
for a in ds.fa:
assert_equal(len(ds.fa[a].value), ds.nfeatures)
示例12: test_shape_conversion
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_shape_conversion():
ds = Dataset.from_wizard(np.arange(24).reshape((2, 3, 4)).view(myarray),
targets=1, chunks=1)
# array subclass survives
ok_(isinstance(ds.samples, myarray))
assert_equal(ds.nsamples, 2)
assert_equal(ds.samples.shape, (2, 12))
assert_array_equal(ds.samples, [range(12), range(12, 24)])
示例13: setUp
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def setUp(self):
data = np.random.standard_normal((100, 3, 4, 2))
labels = np.concatenate((np.repeat(0, 50), np.repeat(1, 50)))
chunks = np.repeat(range(5), 10)
chunks = np.concatenate((chunks, chunks))
mask = np.ones((3, 4, 2), dtype="bool")
mask[0, 0, 0] = 0
mask[1, 3, 1] = 0
self.dataset = Dataset.from_wizard(samples=data, targets=labels, chunks=chunks, mask=mask)
示例14: test_basic_datamapping
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def test_basic_datamapping():
samples = np.arange(24).reshape((4, 3, 2)).view(myarray)
ds = Dataset.from_wizard(samples)
# array subclass survives
ok_(isinstance(ds.samples, myarray))
# mapper should end up in the dataset
ok_(ds.a.has_key('mapper'))
# check correct mapping
ok_(ds.nsamples == 4)
ok_(ds.nfeatures == 6)
示例15: load_mat_ds
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import from_wizard [as 别名]
def load_mat_ds(path, subj, folder, **kwargs):
data = load_mat_data(path, subj, folder, **kwargs)
# load attributes
attr = load_attributes(path, subj, folder, **kwargs)
attr, labels = edit_attr(attr, data.shape)
ds = Dataset.from_wizard(data, attr.targets)
ds = add_subjectname(ds, subj)
ds = add_attributes(ds, attr)
#ds.fa['roi_labels'] = labels
ds.fa['matrix_values'] = np.ones_like(data[0])
ds.sa['chunks'] = LabelEncoder().fit_transform(ds.sa['name'])
return ds