本文整理汇总了Python中sklearn.covariance.EmpiricalCovariance.fit方法的典型用法代码示例。如果您正苦于以下问题:Python EmpiricalCovariance.fit方法的具体用法?Python EmpiricalCovariance.fit怎么用?Python EmpiricalCovariance.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.covariance.EmpiricalCovariance
的用法示例。
在下文中一共展示了EmpiricalCovariance.fit方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_suffstat_sk_full
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
def test_suffstat_sk_full():
# compare the EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
covars_pred = _estimate_gaussian_covariance_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean().reshape((1, -1))
covars_pred = _estimate_gaussian_covariance_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
示例2: test_covariance
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), scaling=False), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), squared=False), 0)
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert(np.amax(mahal_dist) < 250)
assert(np.amin(mahal_dist) > 50)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
示例3: CovEmbedding
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
class CovEmbedding(BaseEstimator, TransformerMixin):
""" Tranformer that returns the coefficients on a flat space to
perform the analysis.
"""
def __init__(self, base_estimator=None, kind='tangent'):
self.base_estimator = base_estimator
self.kind = kind
# if self.base_estimator == None:
# self.base_estimator_ = ...
# else:
# self.base_estimator_ = clone(base_estimator)
def fit(self, X, y=None):
if self.base_estimator is None:
self.base_estimator_ = EmpiricalCovariance(
assume_centered=True)
else:
self.base_estimator_ = clone(self.base_estimator)
if self.kind == 'tangent':
# self.mean_cov = mean_cov = spd_manifold.log_mean(covs)
# Euclidean mean as an approximation to the geodesic
covs = [self.base_estimator_.fit(x).covariance_ for x in X]
covs = my_stack(covs)
mean_cov = np.mean(covs, axis=0)
self.whitening_ = inv_sqrtm(mean_cov)
return self
def transform(self, X):
"""Apply transform to covariances
Parameters
----------
covs: list of array
list of covariance matrices, shape (n_rois, n_rois)
Returns
-------
list of array, transformed covariance matrices,
shape (n_rois * (n_rois+1)/2,)
"""
covs = [self.base_estimator_.fit(x).covariance_ for x in X]
covs = my_stack(covs)
p = covs.shape[-1]
if self.kind == 'tangent':
id_ = np.identity(p)
covs = [self.whitening_.dot(c.dot(self.whitening_)) - id_
for c in covs]
elif self.kind == 'partial correlation':
covs = [cov_to_corr(inv(g)) for g in covs]
elif self.kind == 'correlation':
covs = [cov_to_corr(g) for g in covs]
return np.array([sym_to_vec(c) for c in covs])
示例4: printSciKitCovarianceMatrixs
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
def printSciKitCovarianceMatrixs():
#does not work, ValueError: setting an array element with a sequence.
xMaker = RSTCovarianceMatrixMaker()
nums, data, ilabels = getLabeledRSTData(False)
for i,d in enumerate(data):
d['ratio'] = ilabels[i]
xMaker.setInstanceNums(nums)
xMaker.fit(data)
X = xMaker.transform(data)
correlator = EmpiricalCovariance()
correlator.fit(X)
print correlator.covariance_
示例5: CovEmbedding
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
class CovEmbedding(BaseEstimator, TransformerMixin):
""" Tranformer that returns the coefficients on a flat space to
perform the analysis.
"""
def __init__(self, cov_estimator=None, kind='tangent'):
self.cov_estimator = cov_estimator
self.kind = kind
def fit(self, X, y=None):
if self.cov_estimator is None:
self.cov_estimator_ = EmpiricalCovariance(
assume_centered=True)
else:
self.cov_estimator_ = clone(self.cov_estimator)
if self.kind == 'tangent':
covs = [self.cov_estimator_.fit(x).covariance_ for x in X]
self.mean_cov_ = spd_mfd.frechet_mean(covs, max_iter=30, tol=1e-7)
self.whitening_ = spd_mfd.inv_sqrtm(self.mean_cov_)
return self
def transform(self, X):
"""Apply transform to covariances
Parameters
----------
covs: list of array
list of covariance matrices, shape (n_rois, n_rois)
Returns
-------
list of array, transformed covariance matrices,
shape (n_rois * (n_rois+1)/2,)
"""
covs = [self.cov_estimator_.fit(x).covariance_ for x in X]
covs = spd_mfd.my_stack(covs)
if self.kind == 'tangent':
covs = [spd_mfd.logm(self.whitening_.dot(c).dot(self.whitening_))
for c in covs]
elif self.kind == 'precision':
covs = [spd_mfd.inv(g) for g in covs]
elif self.kind == 'partial correlation':
covs = [prec_to_partial(spd_mfd.inv(g)) for g in covs]
elif self.kind == 'correlation':
covs = [cov_to_corr(g) for g in covs]
else:
raise ValueError("Unknown connectivity measure.")
return np.array([sym_to_vec(c) for c in covs])
示例6: Mahalanobis
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
class Mahalanobis (BaseEstimator):
"""Mahalanobis distance estimator. Uses Covariance estimate
to compute mahalanobis distance of the observations
from the model.
Parameters
----------
robust : boolean to determine wheter to use robust estimator
based on Minimum Covariance Determinant computation
"""
def __init__(self, robust=False):
if not robust:
from sklearn.covariance import EmpiricalCovariance as CovarianceEstimator #
else:
from sklearn.covariance import MinCovDet as CovarianceEstimator #
self.model = CovarianceEstimator()
self.cov = None
def fit(self, X, y=None, **params):
"""Fits the covariance model according to the given training
data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns self.
"""
self.cov = self.model.fit(X)
return self
def score(self, X, y=None):
"""Computes the mahalanobis distances of given observations.
The provided observations are assumed to be centered. One may want to
center them using a location estimate first.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The observations, the Mahalanobis distances of the which we compute.
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Mahalanobis distances of the observations.
"""
#return self.model.score(X,assume_centered=True)
return - self.model.mahalanobis(X-self.model.location_) ** 0.33
示例7: test_suffstat_sk_full
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
def test_suffstat_sk_full():
# compare the precision matrix compute from the
# EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean(axis=0).reshape((1, -1))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
示例8: test_covariance
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print np.amin(mahal_dist), np.amax(mahal_dist)
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
with warnings.catch_warnings(record=True):
cov.fit(X_1sample)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_equal(cov.location_, np.zeros(X.shape[1]))
示例9: detect_bad_channels
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
def detect_bad_channels(inst, pick_types=None, threshold=.2):
from sklearn.preprocessing import RobustScaler
from sklearn.covariance import EmpiricalCovariance
from jr.stats import median_abs_deviation
if pick_types is None:
pick_types = dict(meg='mag')
inst = inst.pick_types(copy=True, **pick_types)
cov = EmpiricalCovariance()
cov.fit(inst._data.T)
cov = cov.covariance_
# center
scaler = RobustScaler()
cov = scaler.fit_transform(cov).T
cov /= median_abs_deviation(cov)
cov -= np.median(cov)
# compute robust summary metrics
mu = np.median(cov, axis=0)
sigma = median_abs_deviation(cov, axis=0)
mu /= median_abs_deviation(mu)
sigma /= median_abs_deviation(sigma)
distance = np.sqrt(mu ** 2 + sigma ** 2)
bad = np.where(distance < threshold)[0]
bad = [inst.ch_names[ch] for ch in bad]
return bad
示例10: test_covariance
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
示例11: xrange
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
###### Likelyhood Computation ######
# Fold the angles in params into proper range, such that
# they centered at the mean.
N_CYCLE_FOLD_ANGLE = 10
for j in xrange(N_CYCLE_FOLD_ANGLE):
mean = np.mean(params, axis=0)
for i in xrange(3, 6): # index 3,4,5 are angles, others are distances
params[:, i][params[:, i] > mean[i] + np.pi] -= 2 * np.pi
params[:, i][params[:, i] < mean[i] - np.pi] += 2 * np.pi
if PARAMS_TLR[i] > mean[i] + np.pi:
PARAMS_TLR[i] += 2 * np.pi
if PARAMS_TLR[i] < mean[i] - np.pi:
PARAMS_TLR[i] -= 2 * np.pi
est = EmpiricalCovariance(True, False)
est.fit(params)
log_likelyhood = est.score(PARAMS_TLR[None, :])
KT = 0.59
free_e = -log_likelyhood * KT
print 'Log likelyhood score:', log_likelyhood
print 'Free energy:', free_e
###### Output the best conformer to pdb ######
def generate_bp_par_file(params, bps, out_name):
assert(len(params) == len(bps))
n_bp = len(params)
# convert from radians to degrees
params[:, 3:] = np.degrees(params[:, 3:])
示例12: fit
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
def fit(self, X, n_jobs=-1):
EmpiricalCovariance.fit(self, X)
if not self.no_fit:
CovarianceOutlierDetectionMixin.set_threshold(
self, X, n_jobs=n_jobs)
return self
示例13: ECDF
# 需要导入模块: from sklearn.covariance import EmpiricalCovariance [as 别名]
# 或者: from sklearn.covariance.EmpiricalCovariance import fit [as 别名]
# save for heuristic correction
age = df_test['var15']
age_ecdf = ECDF(df_train['var15'])
df_train['var15'] = age_ecdf(df_train['var15'])
df_test['var15'] = age_ecdf(df_test['var15'])
# feature engineering
df_train.loc[df_train['var3'] == -999999.000000, 'var3'] = 2.0
df_train['num_zeros'] = (df_train == 0).sum(axis=1)
df_test.loc[df_train['var3'] == -999999.000000, 'var3'] = 2.0
df_test['num_zeros'] = (df_test == 0).sum(axis=1)
# outliers
ec = EmpiricalCovariance()
ec = ec.fit(df_train)
m2 = ec.mahalanobis(df_train)
df_train = df_train[m2 < 40000]
df_target = df_target[m2 < 40000]
# clip
# df_test = df_test.clip(df_train.min(), df_train.max(), axis=1)
# standard preprocessing
prep = Pipeline([
('cd', ColumnDropper(drop=ZERO_VARIANCE_COLUMNS + CORRELATED_COLUMNS)),
('std', StandardScaler())
])
X_train = prep.fit_transform(df_train)
X_test = prep.transform(df_test)