当前位置: 首页>>代码示例>>Python>>正文


Python MiniBatchDictionaryLearning.transform方法代码示例

本文整理汇总了Python中sklearn.decomposition.MiniBatchDictionaryLearning.transform方法的典型用法代码示例。如果您正苦于以下问题:Python MiniBatchDictionaryLearning.transform方法的具体用法?Python MiniBatchDictionaryLearning.transform怎么用?Python MiniBatchDictionaryLearning.transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.decomposition.MiniBatchDictionaryLearning的用法示例。


在下文中一共展示了MiniBatchDictionaryLearning.transform方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_dict_learning_online_positivity

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]
def test_dict_learning_online_positivity(transform_algorithm,
                                         positive_code,
                                         positive_dict):
    rng = np.random.RandomState(0)
    n_components = 8

    dico = MiniBatchDictionaryLearning(
        n_components, transform_algorithm=transform_algorithm, random_state=0,
        positive_code=positive_code, positive_dict=positive_dict).fit(X)
    code = dico.transform(X)
    if positive_dict:
        assert_true((dico.components_ >= 0).all())
    else:
        assert_true((dico.components_ < 0).any())
    if positive_code:
        assert_true((code >= 0).all())
    else:
        assert_true((code < 0).any())

    code, dictionary = dict_learning_online(X, n_components=n_components,
                                            alpha=1, random_state=rng,
                                            positive_dict=positive_dict,
                                            positive_code=positive_code)
    if positive_dict:
        assert_true((dictionary >= 0).all())
    else:
        assert_true((dictionary < 0).any())
    if positive_code:
        assert_true((code >= 0).all())
    else:
        assert_true((code < 0).any())
开发者ID:MartinThoma,项目名称:scikit-learn,代码行数:33,代码来源:test_dict_learning.py

示例2: print

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]
transform_algorithms = [
    ('Orthogonal Matching Pursuit\n1 atom', 'omp',
     {'transform_n_nonzero_coefs': 1}),
    ('Orthogonal Matching Pursuit\n2 atoms', 'omp',
     {'transform_n_nonzero_coefs': 2}),
    ('Least-angle regression\n5 atoms', 'lars',
     {'transform_n_nonzero_coefs': 5}),
    ('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]

reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
    print(title + '...')
    reconstructions[title] = lena.copy()
    t0 = time()
    dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
    code = dico.transform(data)
    patches = np.dot(code, V)

    if transform_algorithm == 'threshold':
        patches -= patches.min()
        patches /= patches.max()

    patches += intercept
    patches = patches.reshape(len(data), *patch_size)
    if transform_algorithm == 'threshold':
        patches -= patches.min()
        patches /= patches.max()
    reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
        patches, (width, height // 2))
    dt = time() - t0
    print('done in %.2fs.' % dt)
开发者ID:heidekrueger,项目名称:CaseStudiesMachineLearning,代码行数:33,代码来源:sklearn_example.py

示例3: BoVWFeature

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]
class BoVWFeature(TransformerMixin):
    """ 
    Extract BoVW Feature
        
    Parameters
    ----------
    codebook_size : int
      the size of codebook, default:1000
    
    method : str
      codebook's compute method , value: 'sc','km'
      
    """
    def __init__(self, codebook_size=512, method='sc'):
        self.codebook_size = codebook_size
        self.method = method
        self.patch_num = 40000
        self.patch_size = 8
        self.sample = 'random'
        self.feature = 'raw' # raw, surf, hog

    
    def fit(self, X, y=None):
        # compute the codes
        print 'Extracting patchs...'
        patchs = []
        num = self.patch_num // X.size
        for x in X:
            img = imread(str(x[0]))
            tmp = extract_patches_2d(img, (self.patch_size,self.patch_size), \
                                     max_patches=num, random_state=np.random.RandomState())
            patchs.append(tmp)
        data = np.vstack(patchs)
        data = data.reshape(data.shape[0], -1)
        
        data -= np.mean(data, axis=0)
        data = data/np.std(data, axis=0)
        
        print 'Learning codebook...'
        if self.method == 'sc':
            self.dico = MiniBatchDictionaryLearning(n_components=self.codebook_size, \
                                               alpha=1, n_iter=100, batch_size =100, verbose=True)
            self.dico.fit(data)
        elif self.method=='km':
            # self.dico = MiniBatchKMeans(n_clusters=self.codebook_size)
            pass
        
        return self
    
    def transform(self, X):
        """         
        Parameters
        ----------
        X : {array-like}, shape = [n_samples, 1]
            Training vectors, where n_samples is the number of samples and
            1 is image path.
      
        Returns
        -------

          array-like = [n_samples, features]
            Class labels predicted by each classifier.
        
        """
        print 'Extracting feature...'
        # setting the dictionary
        self.dico.set_params(transform_algorithm='lars')
        results = []
        for sample in X:
            img = imread(str(sample[0]))
            tmp = extract_patches_2d(img, (self.patch_size,self.patch_size), \
                                     max_patches=300, random_state=np.random.RandomState())
            data = tmp.reshape(tmp.shape[0], -1)
            data = data-np.mean(data, axis=0)
            data = data/np.std(data, axis=0)
            code = self.dico.transform(data)
            results.append(code.sum(axis=0))
        return np.vstack(results)
    
    def get_params(self, deep=True):
        return {"codebook_size": self.codebook_size}
开发者ID:AI42,项目名称:CNN-detection-tracking,代码行数:83,代码来源:BOVW.py

示例4: enumerate

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]
V = mbdic.components_
plt.figure(figsize=(15,12))
for i,comp in enumerate(V):
    plt.subplot(10,10,i+1)
    plt.imshow(comp.reshape(patchsize).T,origin='lower',interpolation='nearest',aspect='auto',cmap='viridis')
    


# ### Reconstruct some data with the dictionary

# In[22]:

transform_algorithm = 'omp'
test_patches = np.reshape(extract_patches_2d(np.log(feature_list[1][500:1000,:]),(16,48)),(-1,16*48))
code = mbdic.transform(test_patches)
reconstructed_patches = np.dot(code, mbdic.components_)


# In[23]:

test_patches = np.reshape(test_patches,(-1,16,48))
reconstructed_patches = np.reshape(reconstructed_patches,(-1,16,48))


# In[12]:

i = 100
imin = min(np.min(test_patches[i]),np.min(reconstructed_patches[i]))
imax = max(np.max(test_patches[i]),np.max(reconstructed_patches[i]))
plt.figure(figsize=(12,4))
开发者ID:mjboos,项目名称:auditoryBSC,代码行数:32,代码来源:AuditoryBSC.py

示例5: imageDenoisingTest01

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]

#.........这里部分代码省略.........
	print "Learning the dictionary"
	t0 = time()
	#这一步是开始对patches进行学习
	#new 一个model
	dico = MiniBatchDictionaryLearning(n_components = 100, alpha = 1, n_iter = 5000)

	print data.shape  #data是30500 * 49维矩阵
	V = dico.fit(data).components_

	print V.shape #V是100 * 49维矩阵
	dt = time() - t0

	print "done in %.2fs." % dt

	plt.figure(figsize = (4.2, 4))
	for i, comp in enumerate(V[:100]):
		plt.subplot(10, 10, i + 1)
		plt.imshow(comp.reshape(patch_size), cmap = plt.cm.gray_r, interpolation = "nearest")
		plt.xticks(())
		plt.yticks(())

	plt.suptitle("Dictionary learned from lena patches\n" + "Train time %.1fs on %d patches" % (dt, len(data)), fontsize = 16)

	plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)

	def show_with_diff(image, reference, title):
		plt.figure(figsize = (5, 3.3))
		plt.subplot(1, 2, 1)
		plt.title('Image')
		plt.imshow(image, vmin = 0, vmax = 1, cmap = plt.cm.gray, interpolation = "nearest")

		plt.xticks(())
		plt.yticks(())
		plt.subplot(1,2,2)

		difference = image - reference

		plt.title("difference (norm: %.2f)" % np.sqrt(np.sum(difference ** 2)))

		plt.imshow(difference, vmin = -0.5, vmax = 0.5, cmap = plt.cm.PuOr, interpolation = "nearest")
		plt.xticks(())
		plt.yticks(())
		plt.suptitle(title, size = 16)

		plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.02)


	show_with_diff(distorted, lena, "Distorted Image")




	#plt.show()

	#Extract noisy patches and reconstruct them using the dictionary
	#从右半边抽取patches
	print('Extracting noisy pathces...')
	t0 = time()
	data = extract_patches_2d(distorted[:, height//2:], patch_size)
	data = data.reshape(data.shape[0], -1)
	intercept = np.mean(data, axis = 0)
	data -= intercept

	print "done in %.2fs. " % (time() - t0)

	transform_algorithms = [('Orthogonal Matching Pursuit\n1 atom', 'omp',
							{'transform_n_nonzero_coefs': 1}),
							('Orthogonal Matching Pursuit\n2 atoms', 'omp',
							{'transform_n_nonzero_coefs': 2}),
							('Least-angle regression\n5 atoms', 'lars',
							{'transform_n_nonzero_coefs': 5}),
							('Thresholding\n alpha = 0.1', 'threshold',
							{'transform_alpha': 0.1})]

	reconstructions = {}
	for title, transform_algorithm, kwargs in transform_algorithms:
		print title + "..."
		reconstructions[title] = lena.copy()
		t0 = time()
		dico.set_params(transform_algorithm = transform_algorithm, **kwargs)
		code = dico.transform(data) #利用之前训练的模型来获得代表系数 -- code
		patches = np.dot(code, V)

		if transform_algorithm == "threshold":
			patches -= patches.min()
			patches /= patches.max()

		patches += intercept
		patches = patches.reshape(len(data), *patch_size)

		if transform_algorithm == "threshold":
			patches -= patches.min()
			patches /= patches.max()

		reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(patches, (width, height // 2))
		dt = time() - t0
		print "done in %.2fs." % dt
		show_with_diff(reconstructions[title], lena, title + '(time: %.1fs)' % dt)

	plt.show()
开发者ID:hyliu0302,项目名称:scikit-learn-notes,代码行数:104,代码来源:myScikitLearnFcns.py

示例6: range

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]
print bestksorted	
"""

#"""
#final grid search sweep
bestK = {}
for i in range(2,4,1):
	K = i**2
	for i in range(-3,0,1):
		alpha = 10**(i)
		print ""
		print "\tK: ", K," alpha: ", alpha
		sc         = MiniBatchDictionaryLearning(n_components=K, alpha=alpha, batch_size=10, verbose=True,n_iter=100).fit(data_noisy)
		components = sc.components_  #.reshape((K, h, w))

		code = sc.transform(data_noisy)
		patches = np.dot(code, components)
		data_denoised =patches
		img_denoised  = util.patchToImage(util.vectorsToPatches(data_denoised,patch_size),(900,1200))
		bestK["K:"+str(K)+"_a:"+str(alpha)] = util.eval_recon(img_clean,img_denoised)

bestksorted = sorted(bestK.iteritems(), key=lambda x:x[1])
print ""
print bestksorted	

best_parameters = map(lambda s: s.split(":"), bestksorted[0][0].split("_"))
print best_parameters
#"""

#"""
##run the final model to output
开发者ID:yongbin999,项目名称:pySpark_denoising,代码行数:33,代码来源:run_me_denoise.py

示例7: get_dictionary_data

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]
def get_dictionary_data(n_comp=20, zero_index=True):
    unlabeled = util.load_unlabeled_training(flatten=False)
    height, width = 32, 32
    n_images = 10000
    patch_size = (8, 8)

    unlabeled = util.standardize(unlabeled)
    np.random.shuffle(unlabeled)

    print('Extracting reference patches...')

    patches = np.empty((0, 64))
    t0 = time()

    for image in unlabeled[:n_images, :, :]:
        data = np.array(extract_patches_2d(image, patch_size, max_patches=0.10))
        data = data.reshape(data.shape[0], -1)
        data -= np.mean(data, axis=0)
        data /= np.std(data, axis=0) + 1e-20
        patches = np.concatenate([patches, data])

    print('done in %.2fs.' % (time() - t0))

    # whiten the patches
    z = zca.ZCA()
    z.fit(patches)
    z.transform(patches)

    print('Learning the dictionary...')
    t0 = time()
    dico = MiniBatchDictionaryLearning(n_components=n_comp, alpha=1)
    V = dico.fit(patches).components_
    dt = time() - t0
    print('done in %.2fs.' % dt)

    #plt.figure(figsize=(4.2, 4))
    #for i, comp in enumerate(V[:100]):
    #    plt.subplot(10, 10, i + 1)
    #    plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
    #               interpolation='nearest')
    #    plt.xticks(())
    #    plt.yticks(())
    #plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
    #plt.show()

    labeled_data, labels = util.load_labeled_training(flatten=False, zero_index=True)
    labeled_data = util.standardize(labeled_data)

    test_data = util.load_all_test(flatten=False)
    test_data = util.standardize(test_data)

    #util.render_matrix(test_data, flattened=False)

    print('Training SVM with the training images...')
    t0 = time()
    reconstructed_images = np.empty((0, 64))
    multiplied_labels = np.empty((0))

    for i in range(len(labeled_data)):
        image = labeled_data[i, :, :]
        label = labels[i]
        data = extract_patches_2d(image, patch_size, max_patches=0.50)
        data = data.reshape(data.shape[0], -1)
        data -= np.mean(data, axis=0)
        data /= np.std(data, axis=0) + 1e-20

        code = dico.transform(data)
        patches = np.dot(code, V)
        z.transform(patches)

        reconstructed_images = np.concatenate([reconstructed_images, patches])
        extended_labels = np.asarray([label] * len(patches))
        multiplied_labels = np.concatenate([multiplied_labels, extended_labels])

    print(reconstructed_images.shape, multiplied_labels.shape)
    svc = SVC()
    #print('Getting cross-val scores...')
    #scores = cross_validation.cross_val_score(svc, reconstructed_images, multiplied_labels, cv=10)
    #print('cross-val scores:', scores)
    #print('cross-val mean:', np.mean(scores))
    #print('cross-val variance:', np.var(scores))

    print('done in %.2fs.' % (time() - t0))

    svc.fit(reconstructed_images, multiplied_labels)

    print('Reconstructing the test images...')
    t0 = time()

    predictions = []

    for i, image in enumerate(test_data):
        data = extract_patches_2d(image, patch_size, max_patches=0.25)
        data = data.reshape(data.shape[0], -1)
        data -= np.mean(data, axis=0)
        data /= np.std(data, axis=0) + 1e-20

        code = dico.transform(data)
        patches = np.dot(code, V)
        z.transform(patches)
#.........这里部分代码省略.........
开发者ID:deepxkn,项目名称:facial-expression-recognition-1,代码行数:103,代码来源:linear_classifier_with_dictionary_data.py

示例8: Sparsecode

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]
class Sparsecode(BaseEstimator, TransformerMixin):
    def __init__(self, patch_file=None, patch_num=10000, patch_size=(16, 16),\
                n_components=384,  alpha = 1, n_iter=1000, batch_size=200):
        self.patch_num = patch_num
        self.patch_size = patch_size
        self.patch_file = patch_file
        
        self.n_components = n_components
        self.alpha = alpha #sparsity controlling parameter
        self.n_iter = n_iter
        self.batch_size = batch_size

    
    def fit(self, X=None, y=None):
        if self.patch_file is None:
            num = self.patch_num // X.size
            data = []
            for item in X:
                img = imread(str(item[0]))
                img = img_as_ubyte(rgb2gray(img))
                #img = self.binary(img) # 二值化
                tmp = extract_patches_2d(img, self.patch_size, max_patches = num,\
                                        random_state=np.random.RandomState())
                data.append(tmp)
            
            data = np.vstack(data)
            data = data.reshape(data.shape[0], -1)
            data = np.asarray(data, 'float32')
        else:
            data = np.load(self.patch_file,'r+') # load npy file, 注意模式,因为后面需要修改
        
        data = np.require(data, dtype=np.float32)
        
        # Standardization
        #logging.info("Pre-processing : Standardization...")
        #self.standard = StandardScaler()
        #data = self.standard.fit_transform(data)
            
        # whiten
        #logging.info("Pre-processing : PCA Whiten...")
        #self.pca = RandomizedPCA(copy=True, whiten=True)
        #data = self.pca.fit_transform(data)
        
        # whiten
        logging.info("Pre-processing : ZCA Whiten...")
        self.zca = ZCA()
        data = self.zca.fit_transform(data)
        
        # 0-1 scaling 都可以用preprocessing模块实现
        #self.minmax = MinMaxScaler()
        #data = self.minmax.fit_transform(data)
        
        """k-means
        self.kmeans = MiniBatchKMeans(n_clusters=self.n_components, init='k-means++', \
                                    max_iter=self.n_iter, batch_size=self.batch_size, verbose=1,\
                                    tol=0.0, max_no_improvement=100,\
                                    init_size=None, n_init=3, random_state=np.random.RandomState(0),\
                                    reassignment_ratio=0.0001)
        logging.info("Sparse coding : Phase 1 - Codebook learning (K-means).")
        self.kmeans.fit(data)
        
        logging.info("Sparse coding : Phase 2 - Define coding method (omp,lars...).")
        self.coder = SparseCoder(dictionary=self.kmeans.cluster_centers_, 
                                 transform_n_nonzero_coefs=256,
                                 transform_alpha=None, 
                                 transform_algorithm='lasso_lars',
                                 n_jobs = 1)
        """
        #'''genertic
        logging.info("Sparse coding...")
        self.coder = MiniBatchDictionaryLearning(n_components=self.n_components, \
                                           alpha=self.alpha, n_iter=self.n_iter, \
                                           batch_size =self.batch_size, verbose=True)
        self.coder.fit(data)
        self.coder.transform_algorithm = 'omp'
        self.coder.transform_alpha = 0.1 # omp情况下,代表重建的误差
        #'''
        return self
    
    def transform(self, X):
        #whiten
        #X_whiten = self.pca.transform(X)
        logging.info("Compute the sparse coding of X.")
        X = np.require(X, dtype=np.float32)
        
        #TODO: 是否一定需要先fit,才能transform
        #X = self.minmax.fit_transform(X)
        
        # -mean/std and whiten
        #X = self.standard.transform(X)
        #X = self.pca.transform(X)
        
        # ZCA
        X = self.zca.transform(X)

        # MiniBatchDictionaryLearning
        # return self.dico.transform(X_whiten)
        
        # k-means
        # TODO: sparse coder method? problem...
#.........这里部分代码省略.........
开发者ID:AI42,项目名称:CNN-detection-tracking,代码行数:103,代码来源:SparseCode.py

示例9: get_dictionary_data

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]
def get_dictionary_data(n_comp=20, zero_index=False):
    unlabeled = util.load_unlabeled_training(flatten=False)
    height, width = 32, 32
    n_images = 10000
    patch_size = (8, 8)

    unlabeled = util.standardize(unlabeled)
    np.random.shuffle(unlabeled)

    print('Extracting reference patches...')

    patches = np.empty((0, 64))
    t0 = time()

    for image in unlabeled[:n_images, :, :]:
        data = np.array(extract_patches_2d(image, patch_size, max_patches=0.01))
        data = data.reshape(data.shape[0], -1)
        data -= np.mean(data, axis=0)
        data /= np.std(data, axis=0) + 1e-20
        patches = np.concatenate([patches, data])

    print('done in %.2fs.' % (time() - t0))

    # whiten the patches
    z = zca.ZCA()
    z.fit(patches)
    z.transform(patches)

    print('Learning the dictionary...')
    t0 = time()
    dico = MiniBatchDictionaryLearning(n_components=n_comp, alpha=1)
    V = dico.fit(patches).components_
    dt = time() - t0
    print('done in %.2fs.' % dt)

    #plt.figure(figsize=(4.2, 4))
    #for i, comp in enumerate(V[:100]):
    #    plt.subplot(10, 10, i + 1)
    #    plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
    #               interpolation='nearest')
    #    plt.xticks(())
    #    plt.yticks(())
    #plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
    #plt.show()

    labeled_data, labels = util.load_labeled_training(flatten=False, zero_index=True)
    labeled_data = util.standardize(labeled_data)

    test_data = util.load_all_test(flatten=False)
    test_data = util.standardize(test_data)

    #util.render_matrix(test_data, flattened=False)

    print('Reconstructing the training images...')
    t0 = time()
    reconstructed_images = np.empty((0, 32, 32))

    for i, image in enumerate(labeled_data):
        data = extract_patches_2d(image, patch_size)
        data = data.reshape(data.shape[0], -1)
        data -= np.mean(data, axis=0)
        data /= np.std(data, axis=0) + 1e-20

        code = dico.transform(data)
        patches = np.dot(code, V)
        z.transform(patches)
        patches = patches.reshape(len(data), *patch_size)

        data = reconstruct_from_patches_2d(patches, (width, height))
        data = data.reshape(1, 32, 32)
        reconstructed_images = np.concatenate([reconstructed_images, data])

    print('done in %.2fs.' % (time() - t0))

    # flatten
    n, x, y = reconstructed_images.shape
    training_images = reconstructed_images.reshape(reconstructed_images.shape[0], reconstructed_images.shape[1]*reconstructed_images.shape[2])
    assert training_images.shape == (n, x*y)

    print('Reconstructing the test images...')
    t0 = time()
    reconstructed_test_images = np.empty((0, 32, 32))

    for image in test_data:
        data = extract_patches_2d(image, patch_size)
        data = data.reshape(data.shape[0], -1)
        data -= np.mean(data, axis=0)
        data /= np.std(data, axis=0) + 1e-20

        code = dico.transform(data)
        patches = np.dot(code, V)
        z.transform(patches)
        patches = patches.reshape(len(data), *patch_size)

        data = reconstruct_from_patches_2d(patches, (width, height))
        data = data.reshape(1, 32, 32)
        reconstructed_test_images = np.concatenate([reconstructed_test_images, data])

    print('done in %.2fs.' % (time() - t0))

#.........这里部分代码省略.........
开发者ID:deepxkn,项目名称:facial-expression-recognition-1,代码行数:103,代码来源:dictionary_learning.py

示例10: components

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]
			# Display the basis components (aka the dictionary).
			pylab.ion()
			pylab.show()
			display(fit)
	if (not restart) or steps[restart] <= steps['COMPUTE_PROJ']:
		with Timer("Compute projection ..."):
			# Project the input patches onto the basis using Orthonormal Matching Pursuit with 2 components.
			model.set_params(transform_algorithm = 'omp', transform_n_nonzero_coefs = N_ATOMS)
			# the intention is simply this:
			#	code = model.transform(data)
			# but we chunk it up and store it in a sparse matrix for efficiency
			code = []
			CHUNK = 1000
			for i in xrange(0, len(data), CHUNK):
				data_i = data[i:i + CHUNK]
				code_i = model.transform(data_i)
				code_i = csr_matrix(code_i)
				code.append(code_i)
			code = sparse.vstack(code)
	if (not restart) or steps[restart] <= steps['RECONSTRUCT']:
		with Timer("Reconstruct images ..."):
			# Reconstruct the input images from the projected patches.
			basis = fit.components_ 
			proj = code.dot(basis)
			proj *= std
			proj += mean
			proj = proj.reshape(len(proj), SAMP_WIDTH, SAMP_HEIGHT)
			approxs = [ ]
			errs = [ ]
			for (master, i1, i2) in zip(masters, idx[:-1], idx[1:]):
				approx = reconstruct_from_patches_2d(proj[i1:i2], master.size[::-1])
开发者ID:gsidier,项目名称:ffoct,代码行数:33,代码来源:ffoct.py

示例11: fit

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import transform [as 别名]
    def fit(self, X, y, **dump_kwargs):
        if self.debug_folder is not None:
            self.dump_init()
        X_ref = self.fm_decoder.fm_to_csr(X, y)
        n_iter = X_ref.shape[0] * self.n_epochs // self.batch_size

        random_state = check_random_state(self.random_state)
        dict_init = random_state.randn(self.n_components, X_ref.shape[1])

        dict_learning = MiniBatchDictionaryLearning(
                n_components=self.n_components,
                alpha=self.alpha,
                transform_alpha=self.alpha,
                fit_algorithm=self.algorithm,
                transform_algorithm=self.algorithm,
                dict_init=dict_init,
                l1_ratio=self.l1_ratio,
                batch_size=self.batch_size,
                shuffle=True,
                fit_intercept=self.fit_intercept,
                n_iter=n_iter,
                missing_values=0,
                learning_rate=self.learning_rate,
                learning_rate_offset=self.learning_rate_offset,
                verbose=3,
                debug_info=self.debug_folder is not None,
                random_state=self.random_state)

        if self.fit_intercept:
            self.dictionary_ = np.r_[np.ones((1, dict_init.shape[1])),
                                     dict_init]
            self.code_ = np.zeros((X.shape[0], self.n_components + 1))
        else:
            self.dictionary_ = dict_init
            self.code_ = np.zeros((X.shape[0], self.n_components))

        if self.debug_folder is None:
            (X_csr, self.global_mean_,
             self.sample_mean_, self.feature_mean_) = csr_center_data(X_ref)
            for i in range(self.n_epochs):
                dict_learning.partial_fit(X_csr, deprecated=False)
                if self.decreasing_batch_size:
                    dict_learning.set_params(batch_size=
                                             dict_learning.batch_size // 2)
            self.n_iter_ = dict_learning.n_iter_
            self.dictionary_ = dict_learning.components_
            self.code_ = dict_learning.transform(X_csr)

        if self.debug_folder is not None:
            (X_csr, self.global_mean_,
             self.sample_mean_, self.feature_mean_) = csr_center_data(X_ref)
            self.dump_inter(**dump_kwargs)

            for i in range(self.n_epochs):
                permutation = random_state.permutation(X_csr.shape[0])

                batches = gen_batches(X_csr.shape[0],
                                      X_csr.shape[0] // 5 + 1)
                last_seen = 0
                for batch in batches:
                    last_seen = max(batch.stop, last_seen)
                    dict_learning.partial_fit(X_csr[permutation[batch]],
                                              deprecated=False)
                    self.dictionary_ = dict_learning.components_
                    self.code_[permutation[:last_seen]] = dict_learning.\
                        transform(X_csr[permutation[:last_seen]])
                    self.n_iter_ = dict_learning.n_iter_
                    self.dump_inter(debug_dict=dict_learning.debug_info_,
                                    **dump_kwargs)
                if self.decreasing_batch_size:
                    dict_learning.set_params(batch_size=
                                             dict_learning.batch_size // 2)
            self.dictionary_ = dict_learning.components_
            self.code_ = dict_learning.transform(X_csr)
        return self
开发者ID:arthurmensch,项目名称:scikit-learn-sandbox,代码行数:77,代码来源:dl_recommender.py


注:本文中的sklearn.decomposition.MiniBatchDictionaryLearning.transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。