当前位置: 首页>>代码示例>>Python>>正文


Python covariance.empirical_covariance函数代码示例

本文整理汇总了Python中sklearn.covariance.empirical_covariance函数的典型用法代码示例。如果您正苦于以下问题:Python empirical_covariance函数的具体用法?Python empirical_covariance怎么用?Python empirical_covariance使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了empirical_covariance函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_covariance

def test_covariance():
    """Tests Covariance module on a simple dataset.

    """
    # test covariance fit from data
    cov = EmpiricalCovariance()
    cov.fit(X)
    assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
    assert_almost_equal(cov.error_norm(empirical_covariance(X)), 0)
    assert_almost_equal(
        cov.error_norm(empirical_covariance(X), norm='spectral'), 0)
    assert_almost_equal(
        cov.error_norm(empirical_covariance(X), norm='frobenius'), 0)
    assert_almost_equal(
        cov.error_norm(empirical_covariance(X), scaling=False), 0)
    assert_almost_equal(
        cov.error_norm(empirical_covariance(X), squared=False), 0)
    # Mahalanobis distances computation test
    mahal_dist = cov.mahalanobis(X)
    assert(np.amax(mahal_dist) < 250)
    assert(np.amin(mahal_dist) > 50)

    # test with n_features = 1
    X_1d = X[:, 0].reshape((-1, 1))
    cov = EmpiricalCovariance()
    cov.fit(X_1d)
    assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
    assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
    assert_almost_equal(
        cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)

    # test integer type
    X_integer = np.asarray([[0, 1], [1, 0]])
    result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
    assert_array_almost_equal(empirical_covariance(X_integer), result)
开发者ID:forkloop,项目名称:scikit-learn,代码行数:35,代码来源:test_covariance.py

示例2: test_shrunk_covariance

def test_shrunk_covariance():
    """Tests ShrunkCovariance module on a simple dataset.

    """
    # compare shrunk covariance obtained from data and from MLE estimate
    cov = ShrunkCovariance(shrinkage=0.5)
    cov.fit(X)
    assert_array_almost_equal(
        shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
        cov.covariance_, 4)

    # same test with shrinkage not provided
    cov = ShrunkCovariance()
    cov.fit(X)
    assert_array_almost_equal(
        shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)

    # same test with shrinkage = 0 (<==> empirical_covariance)
    cov = ShrunkCovariance(shrinkage=0.)
    cov.fit(X)
    assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)

    # test with n_features = 1
    X_1d = X[:, 0].reshape((-1, 1))
    cov = ShrunkCovariance(shrinkage=0.3)
    cov.fit(X_1d)
    assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)

    # test shrinkage coeff on a simple data set (without saving precision)
    cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
    cov.fit(X)
    assert(cov.precision_ is None)
开发者ID:forkloop,项目名称:scikit-learn,代码行数:32,代码来源:test_covariance.py

示例3: _nonrobust_covariance

    def _nonrobust_covariance(self, data, assume_centered=False):
        """Non-robust estimation of the covariance to be used within MCD.

        Parameters
        ----------
        data: array_like, shape (n_samples, n_features)
          Data for which to compute the non-robust covariance matrix.
        assume_centered: Boolean
          Whether or not the observations should be considered as centered.

        Returns
        -------
        nonrobust_covariance: array_like, shape (n_features, n_features)
          The non-robust covariance of the data.

        """
        try:
            cov, prec = graph_lasso(
                empirical_covariance(data, assume_centered=assume_centered),
                self.shrinkage)
        except:
            print " > Exception!"
            emp_cov = empirical_covariance(
                data, assume_centered=assume_centered)
            emp_cov.flat[::data.shape[1] + 1] += 1e-06
            cov, prec = graph_lasso(emp_cov, self.shrinkage)
        return cov
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:27,代码来源:rmcd.py

示例4: test_covariance

def test_covariance():
    """Tests Covariance module on a simple dataset.

    """
    # test covariance fit from data
    cov = EmpiricalCovariance()
    cov.fit(X)
    emp_cov = empirical_covariance(X)
    assert_array_almost_equal(emp_cov, cov.covariance_, 4)
    assert_almost_equal(cov.error_norm(emp_cov), 0)
    assert_almost_equal(
        cov.error_norm(emp_cov, norm='spectral'), 0)
    assert_almost_equal(
        cov.error_norm(emp_cov, norm='frobenius'), 0)
    assert_almost_equal(
        cov.error_norm(emp_cov, scaling=False), 0)
    assert_almost_equal(
        cov.error_norm(emp_cov, squared=False), 0)
    assert_raises(NotImplementedError,
                  cov.error_norm, emp_cov, norm='foo')
    # Mahalanobis distances computation test
    mahal_dist = cov.mahalanobis(X)
    print(np.amin(mahal_dist), np.amax(mahal_dist))
    assert(np.amin(mahal_dist) > 0)

    # test with n_features = 1
    X_1d = X[:, 0].reshape((-1, 1))
    cov = EmpiricalCovariance()
    cov.fit(X_1d)
    assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
    assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
    assert_almost_equal(
        cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)

    # test with one sample
    # FIXME I don't know what this test does
    X_1sample = np.arange(5)
    cov = EmpiricalCovariance()
    assert_warns(UserWarning, cov.fit, X_1sample)
    assert_array_almost_equal(cov.covariance_,
                              np.zeros(shape=(5, 5), dtype=np.float64))

    # test integer type
    X_integer = np.asarray([[0, 1], [1, 0]])
    result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
    assert_array_almost_equal(empirical_covariance(X_integer), result)

    # test centered case
    cov = EmpiricalCovariance(assume_centered=True)
    cov.fit(X)
    assert_array_equal(cov.location_, np.zeros(X.shape[1]))
开发者ID:HapeMask,项目名称:scikit-learn,代码行数:51,代码来源:test_covariance.py

示例5: test_graphical_lasso_iris_singular

def test_graphical_lasso_iris_singular():
    # Small subset of rows to test the rank-deficient case
    # Need to choose samples such that none of the variances are zero
    indices = np.arange(10, 13)

    # Hard-coded solution from R glasso package for alpha=0.01
    cov_R = np.array([
        [0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
        [0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
        [0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
        [0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
    ])
    icov_R = np.array([
        [24.42244057, -16.831679593, 0.0, 0.0],
        [-16.83168201, 24.351841681, -6.206896552, -12.5],
        [0.0, -6.206896171, 153.103448276, 0.0],
        [0.0, -12.499999143, 0.0, 462.5]
    ])
    X = datasets.load_iris().data[indices, :]
    emp_cov = empirical_covariance(X)
    for method in ('cd', 'lars'):
        cov, icov = graphical_lasso(emp_cov, alpha=0.01, return_costs=False,
                                    mode=method)
        assert_array_almost_equal(cov, cov_R, decimal=5)
        assert_array_almost_equal(icov, icov_R, decimal=5)
开发者ID:MartinThoma,项目名称:scikit-learn,代码行数:25,代码来源:test_graphical_lasso.py

示例6: test_graph_lasso

def test_graph_lasso(random_state=0):
    # Sample data from a sparse multivariate normal
    dim = 20
    n_samples = 100
    random_state = check_random_state(random_state)
    prec = make_sparse_spd_matrix(dim, alpha=.95,
                                  random_state=random_state)
    cov = linalg.inv(prec)
    X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
    emp_cov = empirical_covariance(X)

    for alpha in (.1, .01):
        covs = dict()
        for method in ('cd', 'lars'):
            cov_, _, costs = graph_lasso(emp_cov, alpha=.1, return_costs=True)
            covs[method] = cov_
            costs, dual_gap = np.array(costs).T
            # Check that the costs always decrease
            assert_array_less(np.diff(costs), 0)
        # Check that the 2 approaches give similar results
        assert_array_almost_equal(covs['cd'], covs['lars'])

    # Smoke test the estimator
    model = GraphLasso(alpha=.1).fit(X)
    assert_array_almost_equal(model.covariance_, covs['cd'])
开发者ID:Jetafull,项目名称:scikit-learn,代码行数:25,代码来源:test_graph_lasso.py

示例7: test_oas

def test_oas():
    """Tests OAS module on a simple dataset.

    """
    # test shrinkage coeff on a simple data set
    oa = OAS()
    oa.fit(X, assume_centered=True)
    assert_almost_equal(oa.shrinkage_, 0.018740, 4)
    assert_almost_equal(oa.score(X, assume_centered=True), -5.03605, 4)
    # compare shrunk covariance obtained from data and from MLE estimate
    oa_cov_from_mle, oa_shinkrage_from_mle = oas(X, assume_centered=True)
    assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
    assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
    # compare estimates given by OAS and ShrunkCovariance
    scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
    scov.fit(X, assume_centered=True)
    assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)

    # test with n_features = 1
    oa = OAS()
    oa.fit(X_1d, assume_centered=True)
    oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
    assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
    assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
    assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)

    # test shrinkage coeff on a simple data set (without saving precision)
    oa = OAS(store_precision=False)
    oa.fit(X, assume_centered=True)
    assert_almost_equal(oa.score(X, assume_centered=True), -5.03605, 4)
    assert(oa.precision_ is None)

    ### Same tests without assuming centered data
    # test shrinkage coeff on a simple data set
    oa = OAS()
    oa.fit(X)
    assert_almost_equal(oa.shrinkage_, 0.020236, 4)
    assert_almost_equal(oa.score(X), 2.079025, 4)
    # compare shrunk covariance obtained from data and from MLE estimate
    oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
    assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
    assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
    # compare estimates given by OAS and ShrunkCovariance
    scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
    scov.fit(X)
    assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)

    # test with n_features = 1
    oa = OAS()
    oa.fit(X_1d)
    oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
    assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
    assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
    assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)

    # test shrinkage coeff on a simple data set (without saving precision)
    oa = OAS(store_precision=False)
    oa.fit(X)
    assert_almost_equal(oa.score(X), 2.079025, 4)
    assert(oa.precision_ is None)
开发者ID:bvtrach,项目名称:scikit-learn,代码行数:60,代码来源:test_covariance.py

示例8: launch_mcd_on_dataset

def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
                          tol_support):

    rand_gen = np.random.RandomState(0)
    data = rand_gen.randn(n_samples, n_features)
    # add some outliers
    outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
    outliers_offset = 10. * \
        (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
    data[outliers_index] += outliers_offset
    inliers_mask = np.ones(n_samples).astype(bool)
    inliers_mask[outliers_index] = False

    pure_data = data[inliers_mask]
    # compute MCD by fitting an object
    mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
    T = mcd_fit.location_
    S = mcd_fit.covariance_
    H = mcd_fit.support_
    # compare with the estimates learnt from the inliers
    error_location = np.mean((pure_data.mean(0) - T) ** 2)
    assert(error_location < tol_loc)
    error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
    assert(error_cov < tol_cov)
    assert(np.sum(H) >= tol_support)
    assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
开发者ID:JeongSeonGyo,项目名称:EnergyData,代码行数:26,代码来源:test_robust_covariance.py

示例9: get_cov

def get_cov(data):
    dat = data.training_data_all_ways + data.testing_data_all_ways
    num_ways = len(data.get_list_of_ways())
    m = {}
    i = 0
    for way in data.get_list_of_ways():
        m[way] = i
        i += 1
    mat = np.zeros((num_ways,num_ways))
    for elem in dat:
        ways = elem[1]
        for way in ways:
            mat[m[way],m[way]] = mat[m[way],m[way]] + 1
        for w1 in ways:
            for w2 in ways:
                if w1 == w2: continue
                mat[m[w1],m[w2]] = mat[m[w1],m[w2]] + 1
    print mat
    emp_cov = empirical_covariance(mat)
    print emp_cov
    corr = np.zeros((num_ways,num_ways))
    for i in range(num_ways):
        for j in range(num_ways):
            corr[i,j] = emp_cov[i,j]/(math.sqrt(emp_cov[i,i])*math.sqrt(emp_cov[j,j]))
    print corr
    sns.heatmap(corr,vmin = -1, vmax = 1,square=True,xticklabels=m.keys(),yticklabels=m.keys())
    sns.plt.title("Covariance of WAYS frequencies")
    sns.plt.show()
开发者ID:casawa,项目名称:multiclass-multilabel-course-labeling,代码行数:28,代码来源:cov.py

示例10: objective_function

 def objective_function(self, data, location, covariance):
     """Objective function minimized at each step of the MCD algorithm.
     """
     precision = pinvh(covariance)
     det = fast_logdet(precision)
     trace = np.trace(
         np.dot(empirical_covariance(data - location, assume_centered=True),
                precision))
     pen = self.shrinkage * np.trace(precision)
     return -det + trace + pen
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:10,代码来源:rmcd.py

示例11: test_empirical_covariance

    def test_empirical_covariance(self):
        iris = datasets.load_iris()
        df = pdml.ModelFrame(iris)

        result = df.covariance.empirical_covariance()
        expected = covariance.empirical_covariance(iris.data)
        self.assertTrue(isinstance(result, pdml.ModelFrame))
        self.assert_index_equal(result.index, df.data.columns)
        self.assert_index_equal(result.columns, df.data.columns)
        self.assert_numpy_array_almost_equal(result.values, expected)
开发者ID:Sandy4321,项目名称:pandas-ml,代码行数:10,代码来源:test_covariance.py

示例12: covariances

def covariances():
	subject_to_means, subject_to_values = load_data(TRAINING_DATA_FILENAME, True)
	subject_to_covariance = {}
	full_matrix = None
	for key in subject_to_values.keys():
		if full_matrix is None:	
			full_matrix = subject_to_values[key]
			subject_to_covariance[key] = empirical_covariance(subject_to_values[key])
			print subject_to_means[key]
			print subject_to_covariance[key]
		else:
			full_matrix = np.append(full_matrix, subject_to_values[key], axis = 0)
			subject_to_covariance[key] = empirical_covariance(subject_to_values[key])
	
	full_mean = full_matrix.mean(axis=0)
	full_covariance = empirical_covariance(full_matrix)
	print full_mean
	print full_covariance
	return subject_to_covariance, full_covariance, full_mean
开发者ID:ankurbpn,项目名称:CS-259D-HW,代码行数:19,代码来源:question3.py

示例13: test_covariance

def test_covariance():
    """Tests Covariance module on a simple dataset.

    """
    # test covariance fit from data
    cov = EmpiricalCovariance()
    cov.fit(X)
    emp_cov = empirical_covariance(X)
    assert_array_almost_equal(emp_cov, cov.covariance_, 4)
    assert_almost_equal(cov.error_norm(emp_cov), 0)
    assert_almost_equal(cov.error_norm(emp_cov, norm="spectral"), 0)
    assert_almost_equal(cov.error_norm(emp_cov, norm="frobenius"), 0)
    assert_almost_equal(cov.error_norm(emp_cov, scaling=False), 0)
    assert_almost_equal(cov.error_norm(emp_cov, squared=False), 0)
    assert_raises(NotImplementedError, cov.error_norm, emp_cov, norm="foo")
    # Mahalanobis distances computation test
    mahal_dist = cov.mahalanobis(X)
    print(np.amin(mahal_dist), np.amax(mahal_dist))
    assert np.amin(mahal_dist) > 0

    # test with n_features = 1
    X_1d = X[:, 0].reshape((-1, 1))
    cov = EmpiricalCovariance()
    cov.fit(X_1d)
    assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
    assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
    assert_almost_equal(cov.error_norm(empirical_covariance(X_1d), norm="spectral"), 0)

    # test with one sample
    X_1sample = np.arange(5)
    cov = EmpiricalCovariance()
    with warnings.catch_warnings(record=True):
        cov.fit(X_1sample)

    # test integer type
    X_integer = np.asarray([[0, 1], [1, 0]])
    result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
    assert_array_almost_equal(empirical_covariance(X_integer), result)

    # test centered case
    cov = EmpiricalCovariance(assume_centered=True)
    cov.fit(X)
    assert_array_equal(cov.location_, np.zeros(X.shape[1]))
开发者ID:mugiro,项目名称:elm-python,代码行数:43,代码来源:test_covariance.py

示例14: empirical_covariances

def empirical_covariances(subjects, assume_centered=False, standardize=False):
    """Compute empirical covariances for several signals.

    Parameters
    ----------
    subjects : list of numpy.ndarray, shape for each (n_samples, n_features)
        input subjects. Each subject is a 2D array, whose columns contain
        signals. Sample number can vary from subject to subject, but all
        subjects must have the same number of features (i.e. of columns).

    assume_centered : bool, optional
        if True, assume that all input signals are centered. This slightly
        decreases computation time by avoiding useless computation.

    standardize : bool, optional
        if True, set every signal variance to one before computing their
        covariance matrix (i.e. compute a correlation matrix).

    Returns
    -------
    emp_covs : numpy.ndarray, shape : (feature number, feature number, subject number)
        empirical covariances.

    n_samples : numpy.ndarray, shape: (subject number,)
        number of samples for each subject. dtype is np.float.
    """
    if not hasattr(subjects, "__iter__"):
        raise ValueError("'subjects' input argument must be an iterable. "
                         "You provided {0}".format(subjects.__class__))

    n_subjects = [s.shape[1] for s in subjects]
    if len(set(n_subjects)) > 1:
        raise ValueError("All subjects must have the same number of "
                         "features.\nYou provided: {0}".format(str(n_subjects))
                         )
    n_subjects = len(subjects)
    n_features = subjects[0].shape[1]

    # Enable to change dtype here because depending on user, conversion from
    # single precision to double will be required or not.
    emp_covs = np.empty((n_features, n_features, n_subjects), order="F")
    for k, s in enumerate(subjects):
        if standardize:
            s = s / s.std(axis=0)  # copy on purpose
        M = empirical_covariance(s, assume_centered=assume_centered)

        # Force matrix symmetry, for numerical stability
        # of _group_sparse_covariance
        emp_covs[..., k] = M + M.T
    emp_covs /= 2

    n_samples = np.asarray([s.shape[0] for s in subjects], dtype=np.float)

    return emp_covs, n_samples
开发者ID:demianw,项目名称:nilearn,代码行数:54,代码来源:group_sparse_covariance.py

示例15: feat_select

def feat_select(f):

    cp = load(read)
    (X, y, t) = cp.export_data(f)

    data = numpy.c_[X, y]
    cov = empirical_covariance(data, False)

    print cov

    for i in range(cov.shape[0] - 1):
        print cov[i, -1]
开发者ID:mtthss,项目名称:experiments-in-summarization,代码行数:12,代码来源:test.py


注:本文中的sklearn.covariance.empirical_covariance函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。