当前位置: 首页>>代码示例>>Python>>正文


Python decomposition.MiniBatchDictionaryLearning方法代码示例

本文整理汇总了Python中sklearn.decomposition.MiniBatchDictionaryLearning方法的典型用法代码示例。如果您正苦于以下问题:Python decomposition.MiniBatchDictionaryLearning方法的具体用法?Python decomposition.MiniBatchDictionaryLearning怎么用?Python decomposition.MiniBatchDictionaryLearning使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.decomposition的用法示例。


在下文中一共展示了decomposition.MiniBatchDictionaryLearning方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_dict_learning_online_verbosity

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_dict_learning_online_verbosity():
    n_components = 5
    # test verbosity
    from io import StringIO
    import sys

    old_stdout = sys.stdout
    try:
        sys.stdout = StringIO()
        dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
                                           random_state=0)
        dico.fit(X)
        dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
                                           random_state=0)
        dico.fit(X)
        dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
                             random_state=0)
        dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
                             random_state=0)
    finally:
        sys.stdout = old_stdout

    assert dico.components_.shape == (n_components, n_features) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:25,代码来源:test_dict_learning.py

示例2: test_dict_learning_online_partial_fit

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_dict_learning_online_partial_fit():
    n_components = 12
    rng = np.random.RandomState(0)
    V = rng.randn(n_components, n_features)  # random init
    V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
    dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
                                        batch_size=1,
                                        alpha=1, shuffle=False, dict_init=V,
                                        random_state=0).fit(X)
    dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
                                        n_iter=1, dict_init=V,
                                        random_state=0)
    for i in range(10):
        for sample in X:
            dict2.partial_fit(sample[np.newaxis, :])

    assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)
    assert_array_almost_equal(dict1.components_, dict2.components_,
                              decimal=2) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:21,代码来源:test_dict_learning.py

示例3: learn_dictionary_mini

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def learn_dictionary_mini(patches, n_c=512, a=1, n_i=800, n_j=3, b_s=3, es=5, fit_algorithm='lars'):
    """
    patches  - patches to learn on (should be normalized before)
    n_c - number of components (atoms) e.g. 512
    a   - alpha sparsity controlling parameter
    n_i - total number of iterations to perform
    b_s - batch size: number of samples in each mini-batch
    fit_algorithm - {‘lars’, ‘cd’}
    n_j - number of parallel jobs to run (number of threads)
    e_s - size of each element in the dictionary
    """
    dic = MiniBatchDictionaryLearning(n_components=n_c, alpha=a, n_iter=n_i,
                                      n_jobs=n_j, batch_size=b_s, fit_algorithm=fit_algorithm)
    print ("Start learning dictionary_mini: n_c: "+str(n_c)+", alpha: "+str(a)+", n_i: " +
           str(n_i)+", n_j: "+str(n_j)+", es: "+str(es)+", b_s: "+str(b_s))
    v1 = dic.fit(patches).components_
    d1 = v1.reshape(n_c, es, es, es)  # e.g. 512x5x5x5
    return d1 
开发者ID:konopczynski,项目名称:Vessel3DDL,代码行数:20,代码来源:LearnDictionary.py

示例4: test_objectmapper

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        self.assertIs(df.decomposition.PCA, decomposition.PCA)
        self.assertIs(df.decomposition.IncrementalPCA,
                      decomposition.IncrementalPCA)
        self.assertIs(df.decomposition.KernelPCA, decomposition.KernelPCA)
        self.assertIs(df.decomposition.FactorAnalysis,
                      decomposition.FactorAnalysis)
        self.assertIs(df.decomposition.FastICA, decomposition.FastICA)
        self.assertIs(df.decomposition.TruncatedSVD, decomposition.TruncatedSVD)
        self.assertIs(df.decomposition.NMF, decomposition.NMF)
        self.assertIs(df.decomposition.SparsePCA, decomposition.SparsePCA)
        self.assertIs(df.decomposition.MiniBatchSparsePCA,
                      decomposition.MiniBatchSparsePCA)
        self.assertIs(df.decomposition.SparseCoder, decomposition.SparseCoder)
        self.assertIs(df.decomposition.DictionaryLearning,
                      decomposition.DictionaryLearning)
        self.assertIs(df.decomposition.MiniBatchDictionaryLearning,
                      decomposition.MiniBatchDictionaryLearning)

        self.assertIs(df.decomposition.LatentDirichletAllocation,
                      decomposition.LatentDirichletAllocation) 
开发者ID:pandas-ml,项目名称:pandas-ml,代码行数:24,代码来源:test_decomposition.py

示例5: test_dict_learning_online_verbosity

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_dict_learning_online_verbosity():
    n_components = 5
    # test verbosity
    from sklearn.externals.six.moves import cStringIO as StringIO
    import sys

    old_stdout = sys.stdout
    try:
        sys.stdout = StringIO()
        dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
                                           random_state=0)
        dico.fit(X)
        dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
                                           random_state=0)
        dico.fit(X)
        dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
                             random_state=0)
        dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
                             random_state=0)
    finally:
        sys.stdout = old_stdout

    assert_true(dico.components_.shape == (n_components, n_features)) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:25,代码来源:test_dict_learning.py

示例6: test_dict_learning_online_partial_fit

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_dict_learning_online_partial_fit():
    n_components = 12
    rng = np.random.RandomState(0)
    V = rng.randn(n_components, n_features)  # random init
    V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
    dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
                                        batch_size=1,
                                        alpha=1, shuffle=False, dict_init=V,
                                        random_state=0).fit(X)
    dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
                                        n_iter=1, dict_init=V,
                                        random_state=0)
    for i in range(10):
        for sample in X:
            dict2.partial_fit(sample[np.newaxis, :])

    assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
                           0))
    assert_array_almost_equal(dict1.components_, dict2.components_,
                              decimal=2) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:22,代码来源:test_dict_learning.py

示例7: test_dict_learning_online_positivity

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_dict_learning_online_positivity(transform_algorithm,
                                         positive_code,
                                         positive_dict):
    rng = np.random.RandomState(0)
    n_components = 8

    dico = MiniBatchDictionaryLearning(
        n_components, transform_algorithm=transform_algorithm, random_state=0,
        positive_code=positive_code, positive_dict=positive_dict).fit(X)
    code = dico.transform(X)
    if positive_dict:
        assert (dico.components_ >= 0).all()
    else:
        assert (dico.components_ < 0).any()
    if positive_code:
        assert (code >= 0).all()
    else:
        assert (code < 0).any()

    code, dictionary = dict_learning_online(X, n_components=n_components,
                                            alpha=1, random_state=rng,
                                            positive_dict=positive_dict,
                                            positive_code=positive_code)
    if positive_dict:
        assert (dictionary >= 0).all()
    else:
        assert (dictionary < 0).any()
    if positive_code:
        assert (code >= 0).all()
    else:
        assert (code < 0).any() 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:33,代码来源:test_dict_learning.py

示例8: test_dict_learning_online_estimator_shapes

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_dict_learning_online_estimator_shapes():
    n_components = 5
    dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
    dico.fit(X)
    assert dico.components_.shape == (n_components, n_features) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:7,代码来源:test_dict_learning.py

示例9: test_dict_learning_online_overcomplete

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_dict_learning_online_overcomplete():
    n_components = 12
    dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
                                       random_state=0).fit(X)
    assert dico.components_.shape == (n_components, n_features) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:7,代码来源:test_dict_learning.py

示例10: test_dict_learning_online_initialization

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_dict_learning_online_initialization():
    n_components = 12
    rng = np.random.RandomState(0)
    V = rng.randn(n_components, n_features)
    dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
                                       dict_init=V, random_state=0).fit(X)
    assert_array_equal(dico.components_, V) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:9,代码来源:test_dict_learning.py

示例11: learn_dictionary

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def learn_dictionary(X, n_filters, filter_size, n_sample=1000,
                     n_sample_patches=0, **kwargs):
    """
    learn a dictionary of n_filters atoms from n_sample images from X
    """

    n_channels = X.shape[1]

    # subsample n_sample images randomly
    rand_idx = np.random.choice(len(X), n_sample, replace=False)

    # extract patches
    patch_size = (filter_size, filter_size)
    patches = PatchExtractor(patch_size).transform(
        X[rand_idx, ...].reshape(n_sample, X.shape[2], X.shape[3], X.shape[1]))
    patches = patches.reshape(patches.shape[0], -1)
    patches -= np.mean(patches, axis=0)
    patches /= np.std(patches, axis=0)

    if n_sample_patches > 0 and (n_sample_patches < len(patches)):
        np.random.shuffle(patches)
        patches = patches[:n_sample_patches, ...]

    # learn dictionary
    print('Learning dictionary for weight initialization...')

    dico = MiniBatchDictionaryLearning(n_components=n_filters, alpha=1, n_iter=1000, batch_size=10, shuffle=True,
                                       verbose=True, **kwargs)
    W = dico.fit(patches).components_
    W = W.reshape(n_filters, n_channels, filter_size, filter_size)

    print('Dictionary learned.')

    return W.astype(np.float32) 
开发者ID:lukasruff,项目名称:Deep-SVDD,代码行数:36,代码来源:preprocessing.py

示例12: test_dict_learning_online_estimator_shapes

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_dict_learning_online_estimator_shapes():
    n_components = 5
    dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
    dico.fit(X)
    assert_true(dico.components_.shape == (n_components, n_features)) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:7,代码来源:test_dict_learning.py

示例13: test_dict_learning_online_overcomplete

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
def test_dict_learning_online_overcomplete():
    n_components = 12
    dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
                                       random_state=0).fit(X)
    assert_true(dico.components_.shape == (n_components, n_features)) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:7,代码来源:test_dict_learning.py


注:本文中的sklearn.decomposition.MiniBatchDictionaryLearning方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。