本文整理汇总了Python中mvpa2.datasets.base.Dataset.sa['biases']方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.sa['biases']方法的具体用法?Python Dataset.sa['biases']怎么用?Python Dataset.sa['biases']使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mvpa2.datasets.base.Dataset
的用法示例。
在下文中一共展示了Dataset.sa['biases']方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _call
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import sa['biases'] [as 别名]
def _call(self, dataset=None):
"""Extract weights from SMLR classifier.
SMLR always has weights available, so nothing has to be computed here.
"""
clf = self.clf
# transpose to have the number of features on the second axis
# (as usual)
weights = clf.weights.T
if __debug__:
debug('SMLR',
"Extracting weights for %d-class SMLR" %
(len(weights) + 1) +
"Result: min=%f max=%f" %\
(np.min(weights), np.max(weights)))
# limit the labels to the number of sensitivity sets, to deal
# with the case of `fit_all_weights=False`
ds = Dataset(weights,
sa={clf.get_space(): clf._ulabels[:len(weights)]})
if clf.params.has_bias:
ds.sa['biases'] = clf.biases
return ds
示例2: _call
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import sa['biases'] [as 别名]
def _call(self, dataset):
# XXX Hm... it might make sense to unify access functions
# naming across our swig libsvm wrapper and sg access
# functions for svm
clf = self.clf
sgsvm = clf.svm
sens_labels = None
if isinstance(sgsvm, shogun.Classifier.MultiClassSVM):
sens, biases = [], []
nsvms = sgsvm.get_num_svms()
clabels = sorted(clf._attrmap.values())
nclabels = len(clabels)
sens_labels = []
isvm = 0 # index for svm among known
for i in xrange(nclabels):
for j in xrange(i+1, nclabels):
sgsvmi = sgsvm.get_svm(isvm)
labels_tuple = (clabels[i], clabels[j])
# Since we gave the labels in incremental order,
# we always should be right - but it does not
# hurt to check if set of labels is the same
if __debug__ and _shogun_exposes_slavesvm_labels:
if not sgsvmi.get_labels():
# We need to call classify() so labels get assigned
# to the multiclass SVM
sgsvm.classify()
assert(set([sgsvmi.get_label(int(x))
for x in sgsvmi.get_support_vectors()])
== set(labels_tuple))
sens1, bias = self.__sg_helper(sgsvmi)
sens.append(sens1)
biases.append(bias)
sens_labels += [labels_tuple[::-1]] # ??? positive first
isvm += 1
assert(len(sens) == nsvms) # we should have covered all
else:
sens1, bias = self.__sg_helper(sgsvm)
biases = np.atleast_1d(bias)
sens = np.atleast_2d(sens1)
if not clf.__is_regression__:
assert(set(clf._attrmap.values()) == set([-1.0, 1.0]))
assert(sens.shape[0] == 1)
sens_labels = [(-1.0, 1.0)]
ds = Dataset(np.atleast_2d(sens))
if sens_labels is not None:
if isinstance(sens_labels[0], tuple):
# Need to have them in array of dtype object
sens_labels = asobjarray(sens_labels)
if len(clf._attrmap):
sens_labels = clf._attrmap.to_literal(sens_labels, recurse=True)
ds.sa[clf.get_space()] = sens_labels
ds.sa['biases'] = biases
return ds
示例3: _call
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import sa['biases'] [as 别名]
#.........这里部分代码省略.........
# of labels, so we would need to map them back explicitely
ds_labels = list(dataset.sa[clf.get_space()].unique) # labels in the dataset
senses = [None for i in ds_labels]
# first label is given positive value
for i, (c, l) in enumerate( [(svcoef > 0, lambda x: x),
(svcoef < 0, lambda x: x*-1)] ):
# convert to array, and just take the meaningful dimension
c_ = c.A[0]
# NOTE svm_labels are numerical; ds_labels are literal
senses[ds_labels.index(
clf._attrmap.to_literal(svm_labels[i]))] = \
(l(svcoef[:, c_] * svs[c_, :])).A[0]
weights = np.array(senses)
sens_labels = svm_labels
else:
# XXX yoh: .mean() is effectively
# averages across "sensitivities" of all paired classifiers (I
# think). See more info on this topic in svm.py on how sv_coefs
# are stored
#
# First multiply SV coefficients with the actual SVs to get
# weighted impact of SVs on decision, then for each feature
# take mean across SVs to get a single weight value
# per feature
if nr_class is None or nr_class <= 2:
# as simple as this
weights = (svcoef * svs).A
# and only in case of classification
if nr_class:
# ??? First label seems corresponds to positive
sens_labels = [tuple(svm_labels[::-1])]
else:
# we need to compose correctly per each pair of classifiers.
# See docstring for get_sv_coef for more details on internal
# structure of bloody storage
# total # of pairs
npairs = nr_class * (nr_class-1)/2
# # of SVs in each class
NSVs_perclass = model.get_n_sv()
# indices where each class starts in each row of SVs
# name is after similar variable in libsvm internals
nz_start = np.cumsum([0] + NSVs_perclass[:-1])
nz_end = nz_start + NSVs_perclass
# reserve storage
weights = np.zeros((npairs, svs.shape[1]))
ipair = 0 # index of the pair
"""
// classifier (i,j): coefficients with
// i are in sv_coef[j-1][nz_start[i]...],
// j are in sv_coef[i][nz_start[j]...]
"""
sens_labels = []
for i in xrange(nr_class):
for j in xrange(i+1, nr_class):
weights[ipair, :] = np.asarray(
svcoef[j-1, nz_start[i]:nz_end[i]]
* svs[nz_start[i]:nz_end[i]]
+
svcoef[i, nz_start[j]:nz_end[j]]
* svs[nz_start[j]:nz_end[j]]
)
# ??? First label corresponds to positive
# that is why [j], [i]
sens_labels += [(svm_labels[j], svm_labels[i])]
ipair += 1 # go to the next pair
assert(ipair == npairs)
if __debug__ and 'SVM' in debug.active:
if nr_class:
nsvs = model.get_n_sv()
else:
nsvs = model.get_total_n_sv()
if clf.__is_regression__:
svm_type = clf._svm_impl # type of regression
else:
svm_type = '%d-class SVM(%s)' % (nr_class, clf._svm_impl)
debug('SVM',
"Extracting weights for %s: #SVs=%s, " % \
(svm_type, nsvs) + \
" SVcoefshape=%s SVs.shape=%s Rhos=%s." % \
(svcoef.shape, svs.shape, rhos) + \
" Result: min=%f max=%f" % (np.min(weights), np.max(weights)))
ds_kwargs = {}
if nr_class: # for classification only
# and we should have prepared the labels
assert(sens_labels is not None)
if len(clf._attrmap):
if isinstance(sens_labels[0], tuple):
sens_labels = asobjarray(sens_labels)
sens_labels = clf._attrmap.to_literal(sens_labels, recurse=True)
# NOTE: `weights` is already and always 2D
ds_kwargs = dict(sa={clf.get_space(): sens_labels})
weights_ds = Dataset(weights, **ds_kwargs)
weights_ds.sa['biases'] = rhos
return weights_ds