本文整理汇总了Python中mvpa2.support.copy.deepcopy函数的典型用法代码示例。如果您正苦于以下问题:Python deepcopy函数的具体用法?Python deepcopy怎么用?Python deepcopy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了deepcopy函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_mapper_vs_zscore
def test_mapper_vs_zscore():
"""Test by comparing to results of elderly z-score function
"""
# data: 40 sample feature line in 20d space (40x20; samples x features)
dss = [
dataset_wizard(np.concatenate(
[np.arange(40) for i in range(20)]).reshape(20,-1).T,
targets=1, chunks=1),
] + datasets.values()
for ds in dss:
ds1 = deepcopy(ds)
ds2 = deepcopy(ds)
zsm = ZScoreMapper(chunks_attr=None)
assert_raises(RuntimeError, zsm.forward, ds1.samples)
idhashes = (idhash(ds1), idhash(ds1.samples))
zsm.train(ds1)
idhashes_train = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_train)
# forward dataset
ds1z_ds = zsm.forward(ds1)
idhashes_forwardds = (idhash(ds1), idhash(ds1.samples))
# must not modify samples in place!
assert_equal(idhashes, idhashes_forwardds)
# forward samples explicitly
ds1z = zsm.forward(ds1.samples)
idhashes_forward = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_forward)
zscore(ds2, chunks_attr=None)
assert_array_almost_equal(ds1z, ds2.samples)
assert_array_equal(ds1.samples, ds.samples)
示例2: test_all_equal
def test_all_equal():
# all these values are supposed to be different from each other
# but equal to themselves
a = np.random.normal(size=(10, 10)) + 1000.
b = np.zeros((10, 10))
c = np.zeros(10)
d = np.zeros(11)
e = 0
f = None
g = True
h = ''
i = 'a'
j = dict(bummer=np.arange(5))
values = [a, b, c, d, e, f, g, h, i, j]
for ii, v in enumerate(values):
for jj, w in enumerate(values):
# make deepcopy so == operator cannot cheat by checking id()
assert_equal(all_equal(copy.deepcopy(v),
copy.deepcopy(w)),
ii == jj,
msg='cmp(%s, %s)' % (type(v), type(w)))
# ensure that this function behaves like the
# standard python '==' comparator for singulars
singulars = [0, None, True, False, '', 1, 'a']
for v in singulars:
for w in singulars:
assert_equal(all_equal(v, w), v == w)
示例3: clone
def clone(self):
"""Create full copy of the classifier.
It might require classifier to be untrained first due to
present SWIG bindings.
TODO: think about proper re-implementation, without enrollment of deepcopy
"""
if __debug__:
debug("CLF", "Cloning %s%s", (self, _strid(self)))
try:
return deepcopy(self)
except:
self.untrain()
return deepcopy(self)
示例4: _level3
def _level3(self, datasets):
params = self.params # for quicker access ;)
# create a mapper per dataset
mappers = [deepcopy(params.alignment) for ds in datasets]
# key different from level-2; the common space is uniform
#temp_commonspace = commonspace
residuals = None
if self.ca['residual_errors'].enabled:
residuals = np.zeros((1, len(datasets)))
self.ca.residual_errors = Dataset(samples=residuals)
# start from original input datasets again
for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
if __debug__:
debug('HPAL_', "Level 3: ds #%i" % i)
# retrain mapper on final common space
ds_new.sa[m.get_space()] = self.commonspace
m.train(ds_new)
# remove common space attribute again to save on memory
del ds_new.sa[m.get_space()]
if residuals is not None:
# obtain final projection
data_mapped = m.forward(ds_new.samples)
residuals[0, i] = np.linalg.norm(data_mapped - self.commonspace)
return mappers
示例5: _forward_data
def _forward_data(self, data):
if self.__chunks_attr is not None:
raise RuntimeError(
"%s cannot do chunk-wise Z-scoring of plain data "
"since it has to be parameterized with chunks_attr." % self)
if self.__param_est is not None:
raise RuntimeError("%s cannot do Z-scoring with estimating "
"parameters on some attributes of plain"
"data." % self)
params = self.__params_dict
if params is None:
raise RuntimeError, \
"ZScoreMapper needs to be trained before call to forward"
# mappers should not modify the input data
# cast the data to float, since in-place operations below to not upcast!
if np.issubdtype(data.dtype, np.integer):
if self._secret_inplace_zscore:
raise TypeError(
"Cannot perform inplace z-scoring since data is of integer "
"type. Please convert to float before calling zscore")
mdata = data.astype(self.__dtype)
elif self._secret_inplace_zscore:
mdata = data
else:
# do not call .copy() directly, since it might not be an array
mdata = copy.deepcopy(data)
self._zscore(mdata, *params['__all__'])
return mdata
示例6: _level3
def _level3(self, datasets):
params = self.params # for quicker access ;)
# create a mapper per dataset
mappers = [deepcopy(params.alignment) for ds in datasets]
# key different from level-2; the common space is uniform
#temp_commonspace = commonspace
# Fixing nproc=0
if params.nproc == 0:
from mvpa2.base import warning
warning("nproc of 0 doesn't make sense. Setting nproc to 1.")
params.nproc = 1
# Checking for joblib, if not, set nproc to 1
if params.nproc != 1:
from mvpa2.base import externals, warning
if not externals.exists('joblib'):
warning("Setting nproc different from 1 requires joblib package, which "
"does not seem to exist. Setting nproc to 1.")
params.nproc = 1
# start from original input datasets again
if params.nproc == 1:
residuals = []
for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
if __debug__:
debug('HPAL_', "Level 3: ds #%i" % i)
m, residual = get_trained_mapper(ds_new, self.commonspace, m,
self.ca['residual_errors'].enabled)
if self.ca['residual_errors'].enabled:
residuals.append(residual)
else:
if __debug__:
debug('HPAL_', "Level 3: Using joblib with nproc = %d " % params.nproc)
verbose_level_parallel = 20 \
if (__debug__ and 'HPAL' in debug.active) else 0
from joblib import Parallel, delayed
import sys
# joblib's 'multiprocessing' backend has known issues of failure on OSX
# Tested with MacOS 10.12.13, python 2.7.13, joblib v0.10.3
if params.joblib_backend is None:
params.joblib_backend = 'threading' if sys.platform == 'darwin' \
else 'multiprocessing'
res = Parallel(
n_jobs=params.nproc, pre_dispatch=params.nproc,
backend=params.joblib_backend,
verbose=verbose_level_parallel
)(
delayed(get_trained_mapper)
(ds, self.commonspace, mapper, self.ca['residual_errors'].enabled)
for ds, mapper in zip(datasets, mappers)
)
mappers = [m for m, r in res]
if self.ca['residual_errors'].enabled:
residuals = [r for m, r in res]
if self.ca['residual_errors'].enabled:
self.ca.residual_errors = Dataset(samples=np.array(residuals)[None, :])
return mappers
示例7: setUp
def setUp(self):
self.backup = []
# paranoid check
self.cfgstr = str(cfg)
# clean up externals cfg for proper testing
if cfg.has_section('externals'):
self.backup = copy.deepcopy(cfg.items('externals'))
cfg.remove_section('externals')
示例8: test_deep_copying_state_variable
def test_deep_copying_state_variable(self):
for v in (True, False):
sv = ConditionalAttribute(enabled=v,
doc="Testing")
sv.enabled = not v
sv_dc = copy.deepcopy(sv)
self.failUnlessEqual(sv.enabled, sv_dc.enabled)
self.failUnlessEqual(sv.name, sv_dc.name)
self.failUnlessEqual(sv._instance_index, sv_dc._instance_index)
示例9: test_deep_copying_state_variable
def test_deep_copying_state_variable(self):
for v in (True, False):
sv = ConditionalAttribute(enabled=v, doc="Testing")
sv.enabled = not v
sv_dc = copy.deepcopy(sv)
if not (__debug__ and "ENFORCE_CA_ENABLED" in debug.active):
self.assertEqual(sv.enabled, sv_dc.enabled)
self.assertEqual(sv.name, sv_dc.name)
self.assertEqual(sv._instance_index, sv_dc._instance_index)
示例10: select_samples
def select_samples(self, selection):
"""Return new ColumnData with selected samples"""
data = copy.deepcopy(self)
for k, v in data.iteritems():
data[k] = [v[x] for x in selection]
data._check()
return data
示例11: test_id_hash
def test_id_hash(self, pair):
a, b = pair
a1 = deepcopy(a)
a_1 = idhash(a)
self.assertTrue(a_1 == idhash(a), msg="Must be of the same idhash")
self.assertTrue(a_1 != idhash(b), msg="Must be of different idhash")
if isinstance(a, np.ndarray):
self.assertTrue(a_1 != idhash(a.T), msg=".T must be of different idhash")
if not isinstance(a, tuple):
self.assertTrue(a_1 != idhash(a1), msg="Must be of different idhash")
a[2] += 1; a_2 = idhash(a)
self.assertTrue(a_1 != a_2, msg="Idhash must change")
else:
a_2 = a_1
a = a[2:]; a_3 = idhash(a)
self.assertTrue(a_2 != a_3, msg="Idhash must change after slicing")
示例12: is_sorted
def is_sorted(items):
"""Check if listed items are in sorted order.
Parameters
----------
`items`: iterable container
:return: `True` if were sorted. Otherwise `False` + Warning
"""
items_sorted = deepcopy(items)
items_sorted.sort()
equality = items_sorted == items
# XXX yarik forgotten analog to isiterable
if hasattr(equality, '__iter__'):
equality = np.all(equality)
return equality
示例13: __new__
def __new__(cls, *args, **kwargs):
"""Instantiate ClassWithCollections object
"""
self = super(ClassWithCollections, cls).__new__(cls)
s__dict__ = self.__dict__
# init variable
# XXX: Added as pylint complained (rightfully) -- not sure if false
# is the proper default
self.__params_set = False
# need to check to avoid override of enabled ca in the case
# of multiple inheritance, like both ClassWithCollectionsl and
# Harvestable
if '_collections' not in s__dict__:
s__class__ = self.__class__
collections = copy.deepcopy(s__class__._collections_template)
s__dict__['_collections'] = collections
s__dict__['_known_attribs'] = {}
"""Dictionary to contain 'links' to the collections from each
known attribute. Is used to gain some speed up in lookup within
__getattribute__ and __setattr__
"""
# Assign owner to all collections
for col, collection in collections.iteritems():
if col in s__dict__:
raise ValueError, \
"Object %s has already attribute %s" % \
(self, col)
s__dict__[col] = collection
collection.name = col
self.__params_set = False
if __debug__:
descr = kwargs.get('descr', None)
debug("COL", "ClassWithCollections.__new__ was done "
"for %s%s with descr=%s",
(s__class__.__name__, _strid(self), descr))
return self
示例14: test_generic_tests
def test_generic_tests(self):
"""Test all classifiers for conformant behavior
"""
for clf_, traindata in \
[(clfswh['binary'], datasets['dumb2']),
(clfswh['multiclass'], datasets['dumb'])]:
traindata_copy = deepcopy(traindata) # full copy of dataset
for clf in clf_:
clf.train(traindata)
self.assertTrue(
(traindata.samples == traindata_copy.samples).all(),
"Training of a classifier shouldn't change original dataset")
# TODO: enforce uniform return from predict??
#predicted = clf.predict(traindata.samples)
#self.assertTrue(isinstance(predicted, np.ndarray))
# Just simple test that all of them are syntaxed correctly
self.assertTrue(str(clf) != "")
self.assertTrue(repr(clf) != "")
示例15: test_more_svd
def test_more_svd(self):
pm = SVDMapper()
# train SVD
pm.train(self.largefeat)
# mixing matrix cannot be square
self.failUnlessEqual(pm.proj.shape, (40, 10))
# only first singular value significant
self.failUnless(pm.sv[:1] > 10)
self.failUnless((pm.sv[1:] < 10).all())
# now project data into SVD space
p = pm.forward(self.largefeat)
# only variance of first component significant
var = p.var(axis=0)
# test that only one component has variance
self.failUnless(var[:1] > 1.0)
self.failUnless((var[1:] < 0.0001).all())
# check that the mapped data can be fully recovered by 'reverse()'
rp = pm.reverse(p)
self.failUnlessEqual(rp.shape, self.largefeat.shape)
self.failUnless((np.round(rp) == self.largefeat).all())
# copy mapper
pm2 = deepcopy(pm)
# now make new random data and do forward->reverse check
data = np.random.normal(size=(98,40))
data_f = pm.forward(data)
self.failUnlessEqual(data_f.shape, (98,10))
data_r = pm.reverse(data_f)
self.failUnlessEqual(data_r.shape, (98,40))