当前位置: 首页>>代码示例>>Python>>正文


Python MiniBatchDictionaryLearning.fit方法代码示例

本文整理汇总了Python中sklearn.decomposition.MiniBatchDictionaryLearning.fit方法的典型用法代码示例。如果您正苦于以下问题:Python MiniBatchDictionaryLearning.fit方法的具体用法?Python MiniBatchDictionaryLearning.fit怎么用?Python MiniBatchDictionaryLearning.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.decomposition.MiniBatchDictionaryLearning的用法示例。


在下文中一共展示了MiniBatchDictionaryLearning.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: dictionay_learning_MHOF_online

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def dictionay_learning_MHOF_online(training_samples_num=400):
    from MHOF_Extraction import MHOF_Extraction
    from MHOF_histogram_block import MHOF_histogram_block
    from sklearn.decomposition import MiniBatchDictionaryLearning
    import numpy as np
    import cv2
    import video
    cam=video.create_capture('Crowd-Activity-All.avi')
    height_block_num=4
    width_block_num=5
    bin_num=16
    ret,prev=cam.read()
    ret,img=cam.read()
    flow_H=MHOF_Extraction(prev,img)
    flow_hist_H=MHOF_histogram_block(flow_H,height_block_num,width_block_num,bin_num)
    flow_hist_H=np.reshape(flow_hist_H,[1,flow_hist_H.size])
    #  error!!!!
    dico=MiniBatchDictionaryLearning(1,alpha=1,n_iter=500)
    dic=dico.fit(flow_hist_H).components_
    for i in range(training_samples_num):
        ret,img=cam.read()
        flow_H=MHOF_Extraction(prev,img)
        flow_hist_H=MHOF_histogram_block(flow_H,height_block_num,width_block_num,bin_num)
        dico=MiniBatchDictionaryLearing(i+1,alpha=1,n_iter=500,dict_init=dic)
        dic=dico.fit(flow_hist_H).components
    return dic

        
开发者ID:burness,项目名称:MHOF,代码行数:28,代码来源:dictionary_learning_MHOF_online.py

示例2: sklearn_check

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def sklearn_check(img, patch_size, dic_size, T=1000):
    patch_shape = (patch_size, patch_size)
    patches = extract_patches_2d(img, patch_shape)
    patches = patches.reshape(patches.shape[0], -1)
    patches = center(patches)
    dl = MiniBatchDictionaryLearning(dic_size, n_iter=T)
    dl.fit(patches)
    D = dl.components_.T
    return D
开发者ID:Mandrathax,项目名称:sparse_coding,代码行数:11,代码来源:optim.py

示例3: to_sparse

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def to_sparse(X,dim):

	sparse_dict = MiniBatchDictionaryLearning(dim)
	sparse_dict.fit(X)
	sparse_vectors = sparse_encode(X, sparse_dict.components_)

	for i in sparse_vectors:
		print i

	return sparse_vectors
开发者ID:tarekmehrez,项目名称:extended-word2vec,代码行数:12,代码来源:sparse_and_test.py

示例4: BOW_sparsecoding

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
class BOW_sparsecoding(BOW):

	def codebook(self):
		self.mbdl =  MiniBatchDictionaryLearning(self.N_codebook)
		self.mbdl.fit(self.raw_features)
		

	def bow_feature_extract(self, path):
		des = self.raw_feature_extract(path)
		out = sum(sparse_encode(des, self.mbdl.components_))
		out = np.array([out])
		return out
开发者ID:andreydung,项目名称:bagofwords,代码行数:14,代码来源:bow.py

示例5: test_dict_learning_online_verbosity

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def test_dict_learning_online_verbosity():
    n_components = 5
    # test verbosity
    from cStringIO import StringIO
    import sys
    old_stdout = sys.stdout
    sys.stdout = StringIO()
    dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1)
    dico.fit(X)
    dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2)
    dico.fit(X)
    dict_learning_online(X, n_components=n_components, alpha=1, verbose=1)
    dict_learning_online(X, n_components=n_components, alpha=1, verbose=2)
    sys.stdout = old_stdout
    assert_true(dico.components_.shape == (n_components, n_features))
开发者ID:Jetafull,项目名称:scikit-learn,代码行数:17,代码来源:test_dict_learning.py

示例6: buildmodel2

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def buildmodel2():
    "生成有眼镜-无眼镜pair模型"
    modelrec = np.load('cut_rec.npy')
    modelglass = np.load('glassline.npy')[:modelrec.shape[0]]

    linkedmodel = np.empty((modelrec.shape[0],modelrec.shape[1]+modelglass.shape[1]),'f')
    linkedmodel[:,:modelrec.shape[1]]=modelrec
    linkedmodel[:,modelrec.shape[1]:]=modelglass

    #Train
    from sklearn.decomposition import MiniBatchDictionaryLearning
    learning = MiniBatchDictionaryLearning(500,verbose=True)
    learning.fit(linkedmodel)
    import cPickle
    cPickle.dump(learning,file('sparselinked','wb'),-1)
开发者ID:wzc11,项目名称:glasses-removal,代码行数:17,代码来源:model2.py

示例7: main

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def main(games_path = None):
    
    if games_path == None:
        games_path = 'specmine/data/go_games/2010-01.pickle.gz'

    with specmine.util.openz(games_path) as games_file:
        games = pickle.load(games_file)

    boards = None # numpy array nx9x9 
    for game in games:
        if boards == None: 
            boards = games[game].grids
        else:
            boards = numpy.vstack((boards,games[game].grids))

    print 'boards shape: ', boards.shape

    boards = boards.reshape((boards.shape[0],-1))

    print 'boards reshaped: ', boards.shape

    print 'Learning the dictionary... '
    t0 = time()
    dico = MiniBatchDictionaryLearning(n_atoms=100, alpha=1, n_iter=500)
    V = dico.fit(boards).components_
    dt = time() - t0
    print 'done in %.2fs.' % dt

    #pl.figure(figsize=(4.2, 4))
    for i, comp in enumerate(V[:100]):
        pl.subplot(10, 10, i + 1)
        pl.imshow(comp, cmap=pl.cm.gray_r) # interpolation='nearest')
        pl.xticks(())
        pl.yticks(())
开发者ID:bsilverthorn,项目名称:spectral-mining,代码行数:36,代码来源:sparse_coding.py

示例8: scskl_dico_learning

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def scskl_dico_learning(list_pickled_array,n_atoms,maxepoch=5,maxiter=100):
  D = None
  for e in range(maxepoch):
    for a in list_pickled_array:
      data = joblib.load(a)
      dico = MiniBatchDictionaryLearning(n_components=n_atoms, n_iter=maxiter, dict_init=D)
      D = dico.fit(data).components_.astype(np.float32)
  return D      
开发者ID:rousseau,项目名称:fbrain,代码行数:10,代码来源:sparsity.py

示例9: test_dict_learning_online_verbosity

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def test_dict_learning_online_verbosity():
    n_components = 5
    # test verbosity
    from sklearn.externals.six.moves import cStringIO as StringIO
    import sys
    old_stdout = sys.stdout
    sys.stdout = StringIO()
    dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
                                       random_state=0)
    dico.fit(X)
    dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
                                       random_state=0)
    dico.fit(X)
    dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
                         random_state=0)
    dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
                         random_state=0)
    sys.stdout = old_stdout
    assert_true(dico.components_.shape == (n_components, n_features))
开发者ID:2011200799,项目名称:scikit-learn,代码行数:21,代码来源:test_dict_learning.py

示例10: create_dictionaries

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def create_dictionaries(n_codewords=20):
	dataset_features = np.load('MSR_Features_hog-hof-skel1360423760.27.dat')
	hogs = []
	hofs = []
	skels = []
	for n in dataset_features.keys():
		hogs +=	dataset_features[n]['hog']
		hofs +=	dataset_features[n]['hof']
		skels += [normalize_skeleton(dataset_features[n]['skel_world'])]

	''' Input should be features[n_samples, n_features] '''
	hogs = np.vstack(hogs)
	hofs = np.vstack(hofs)
	skels = np.vstack(skels)

	hog_dict = MiniBatchDictionaryLearning(n_codewords, n_jobs=-1, verbose=True, transform_algorithm='lasso_lars')
	hog_dict.fit(hogs)
	hof_dict = MiniBatchDictionaryLearning(n_codewords, n_jobs=-1, verbose=True, transform_algorithm='lasso_lars')
	hof_dict.fit(hofs)
	skels_dict = MiniBatchDictionaryLearning(n_codewords, n_jobs=-1, verbose=True, transform_algorithm='lasso_lars')
	skels_dict.fit(skels)

	feature_dictionaries = {'hog':hog_dict, 'hof':hof_dict, 'skel':skels_dict}

	with open('MSR_Dictionaries_hog-hof-skel_%f.dat'%time.time(), 'wb') as outfile:
	    pickle.dump(feature_dictionaries, outfile, protocol=pickle.HIGHEST_PROTOCOL)
开发者ID:MerDane,项目名称:pyKinectTools,代码行数:28,代码来源:main_MSR-Dataset.py

示例11: test_dict_learning_online_verbosity

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def test_dict_learning_online_verbosity():
    n_components = 5
    # test verbosity
    from io import StringIO
    import sys

    old_stdout = sys.stdout
    try:
        sys.stdout = StringIO()
        dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
                                           random_state=0)
        dico.fit(X)
        dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
                                           random_state=0)
        dico.fit(X)
        dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
                             random_state=0)
        dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
                             random_state=0)
    finally:
        sys.stdout = old_stdout

    assert dico.components_.shape == (n_components, n_features)
开发者ID:hmshan,项目名称:scikit-learn,代码行数:25,代码来源:test_dict_learning.py

示例12: learning_sparse_coding

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def learning_sparse_coding(X, components=None):
    """
    http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.DictionaryLearning.html
    http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.sparse_encode.html
    """
    if components is None:
        print('Learning the dictionary...')
        t0 = time()
        diclearner = MiniBatchDictionaryLearning(n_components=100, verbose=True)
        components = diclearner.fit(X).components_
        np.savetxt('components_of_convfeat.txt', components)
        dt = time() - t0
        print('done in %.2fs.' % dt)

    codes = sparse_encode(X, components)
    np.savetxt('sparse_codes_of_convfeat.txt', codes)
开发者ID:HunjaeJung,项目名称:imagenet2014-modified,代码行数:18,代码来源:model_sparse.py

示例13: train_sparse_coding

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
def train_sparse_coding(feature_list, patch_list, dict_size=256, transform_alpha=0.5, n_iter=50):
    """
    使用mini batch训练稀疏编码
    #feature_list 表示要训练的特征的列表
    #patch_list 表示结果patch的列表

    :return sc_list
    """
    sc_list = []
    i = 0
    for feature, patch in zip(feature_list, patch_list):
        i = i + 1
        '''
        由于组合数值大小比例的问题,稀疏编码可能忽略较小的特征,下面的×10需要用别的特征归一化方法代替
        相关性越大,则每个向量都是有用的,所以需要更长的时间进行训练。
        '''
        dico = None
        X = np.concatenate((feature, patch), axis=1)

        if len(X) > 100000:
            np.random.shuffle(X)
            X = X[:90000]

        if len(X) < 5000:
            print "进入DictionaryLearning状态"
            dico = MiniBatchDictionaryLearning(batch_size=1000, transform_algorithm='lasso_lars', fit_algorithm='lars',
                                               transform_n_nonzero_coefs=5, n_components=len(X)/50,
                                               dict_init=X[:len(X)/50],
                                               n_iter=n_iter, transform_alpha=transform_alpha, verbose=10, n_jobs=-1)
        else:
            print "进入MiniBatchDictionaryLearning状态"
            dico = MiniBatchDictionaryLearning(batch_size=1000, transform_algorithm='lasso_lars', fit_algorithm='lars',
                                               transform_n_nonzero_coefs=5, n_components=len(X)/50,
                                               dict_init=X[:len(X)/50],
                                               n_iter=n_iter, transform_alpha=transform_alpha, verbose=10, n_jobs=-1)
        V = dico.fit(X).components_
        sc_list.append(V)

        file_name = "./tmp_file/_tmp_sc_list_new_clsd_raw_%d.pickle" % (i)
        sc_file = open(file_name, 'wb')
        cPickle.dump(sc_list, sc_file, 1)
        sc_file.close()

    return sc_list
开发者ID:liangz0707,项目名称:mySuperResolution,代码行数:46,代码来源:training_dict.py

示例14: Layer

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
class Layer(object):

    def __init__(self, hierarchy, depth, patch_size, num_features, num_patches, multiplier):
        """
         * depth - hierarchy level (1, 2, 3, etc.)
         * patch_size - number of pixels representing side of the square patch.
           like, 8 (8x8 patches)
         * num_features - how many components to learn
         * multiplier - num of subpatches we break patch into
           (0 for the first level). if 3, patch will contant 3x3 subpatches.
        """
        self.hierarchy = hierarchy
        self.depth = depth
        self.basement_size = patch_size
        self.num_features = num_features
        self.num_patches = num_patches
        self.multiplier = multiplier
        self.learning = MiniBatchDictionaryLearning(
            n_components=num_features, n_iter=3000, transform_algorithm='lasso_lars', transform_alpha=0.5, n_jobs=2)
        self.ready = False

    def get_data(self, data, max_patches=None):
        """
        Extracts raw data from patches.
        """
        max_patches = max_patches or self.num_patches
        if isinstance(data, np.ndarray):
            # one image
            patches = extract_patches_2d(
                data, (self.basement_size, self.basement_size), max_patches=max_patches)
        else:
            patches = []
            # multiple images
            for i in xrange(max_patches):
                idx = np.random.randint(len(data))  # selecting random image
                dx = dy = self.basement_size
                if data[idx].shape[0] <= dx or data[idx].shape[1] <= dy:
                    continue
                x = np.random.randint(data[idx].shape[0] - dx)
                y = np.random.randint(data[idx].shape[1] - dy)
                patch = data[idx][x: x + dx, y: y + dy]
                patches.append(patch.reshape(-1))
            patches = np.vstack(patches)
            patches = patches.reshape(patches.shape[0], self.basement_size, self.basement_size)
        print 'patches', patches.shape
        patches = preprocessing.scale(patches)
        return patches

    def learn(self, data):
        data = data.reshape(data.shape[0], -1)
        self.learning.fit(data)
        self.ready = True

    @property
    def output_size(self):
        return int(np.sqrt(self.num_features))

    @property
    def input_size(self):
        if self.depth == 0:
            return self.basement_size
        else:
            prev_layer = self.hierarchy.layers[self.depth - 1]
            r = prev_layer.output_size * self.multiplier
            return r
        return self._input_size

    @property
    def features(self):
        return self.learning.components_

    # def get_features(self):
    #     # going from up to down
    #     result = []
    #     layers = self.hierarchy.layers[: self.depth][::-1]
    #     if self.depth == 0:
    #         return self.features

    #     previous_layer = self.hierarchy.layers[self.depth - 1]
    #     for feature in self.features:
    #         multiplier = self.multiplier
    #         feature = feature.reshape(self.multiplier * previous_layer.output_size,
    #                                   self.multiplier * previous_layer.output_size,)
    #         for other_layer in layers:
    #             expressed_feature = np.empty((multiplier * other_layer.input_size,
    #                                           multiplier * other_layer.input_size))
    #             enc_n = other_layer.output_size
    #             n = other_layer.input_size
    #             for dx in range(multiplier):
    #                 for dy in range(multiplier):
    #                     encoded_subfeature = feature[dx * enc_n: (dx + 1) * enc_n,
    #                                                  dy * enc_n: (dy + 1) * enc_n]
    #                     prev_patch = np.dot(encoded_subfeature.reshape(-1), other_layer.features)
    #                     expressed_feature[dx * n: (dx + 1) * n, dy * n: (dy + 1) * n] = prev_patch.reshape(n, n)
    #             feature = expressed_feature
    #             multiplier *= other_layer.multiplier
    #         result.append(expressed_feature.reshape(-1))
    #     result = np.vstack(result)
    #     return result

#.........这里部分代码省略.........
开发者ID:alh92064,项目名称:deep_hierarchy,代码行数:103,代码来源:hierarchy.py

示例15: BoVWFeature

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import fit [as 别名]
class BoVWFeature(TransformerMixin):
    """ 
    Extract BoVW Feature
        
    Parameters
    ----------
    codebook_size : int
      the size of codebook, default:1000
    
    method : str
      codebook's compute method , value: 'sc','km'
      
    """
    def __init__(self, codebook_size=512, method='sc'):
        self.codebook_size = codebook_size
        self.method = method
        self.patch_num = 40000
        self.patch_size = 8
        self.sample = 'random'
        self.feature = 'raw' # raw, surf, hog

    
    def fit(self, X, y=None):
        # compute the codes
        print 'Extracting patchs...'
        patchs = []
        num = self.patch_num // X.size
        for x in X:
            img = imread(str(x[0]))
            tmp = extract_patches_2d(img, (self.patch_size,self.patch_size), \
                                     max_patches=num, random_state=np.random.RandomState())
            patchs.append(tmp)
        data = np.vstack(patchs)
        data = data.reshape(data.shape[0], -1)
        
        data -= np.mean(data, axis=0)
        data = data/np.std(data, axis=0)
        
        print 'Learning codebook...'
        if self.method == 'sc':
            self.dico = MiniBatchDictionaryLearning(n_components=self.codebook_size, \
                                               alpha=1, n_iter=100, batch_size =100, verbose=True)
            self.dico.fit(data)
        elif self.method=='km':
            # self.dico = MiniBatchKMeans(n_clusters=self.codebook_size)
            pass
        
        return self
    
    def transform(self, X):
        """         
        Parameters
        ----------
        X : {array-like}, shape = [n_samples, 1]
            Training vectors, where n_samples is the number of samples and
            1 is image path.
      
        Returns
        -------

          array-like = [n_samples, features]
            Class labels predicted by each classifier.
        
        """
        print 'Extracting feature...'
        # setting the dictionary
        self.dico.set_params(transform_algorithm='lars')
        results = []
        for sample in X:
            img = imread(str(sample[0]))
            tmp = extract_patches_2d(img, (self.patch_size,self.patch_size), \
                                     max_patches=300, random_state=np.random.RandomState())
            data = tmp.reshape(tmp.shape[0], -1)
            data = data-np.mean(data, axis=0)
            data = data/np.std(data, axis=0)
            code = self.dico.transform(data)
            results.append(code.sum(axis=0))
        return np.vstack(results)
    
    def get_params(self, deep=True):
        return {"codebook_size": self.codebook_size}
开发者ID:AI42,项目名称:CNN-detection-tracking,代码行数:83,代码来源:BOVW.py


注:本文中的sklearn.decomposition.MiniBatchDictionaryLearning.fit方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。