当前位置: 首页>>代码示例>>Python>>正文


Python MiniBatchDictionaryLearning.set_params方法代码示例

本文整理汇总了Python中sklearn.decomposition.MiniBatchDictionaryLearning.set_params方法的典型用法代码示例。如果您正苦于以下问题:Python MiniBatchDictionaryLearning.set_params方法的具体用法?Python MiniBatchDictionaryLearning.set_params怎么用?Python MiniBatchDictionaryLearning.set_params使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.decomposition.MiniBatchDictionaryLearning的用法示例。


在下文中一共展示了MiniBatchDictionaryLearning.set_params方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: print

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import set_params [as 别名]
transform_algorithms = [
    ('Orthogonal Matching Pursuit\n1 atom', 'omp',
     {'transform_n_nonzero_coefs': 1}),
    ('Orthogonal Matching Pursuit\n2 atoms', 'omp',
     {'transform_n_nonzero_coefs': 2}),
    ('Least-angle regression\n5 atoms', 'lars',
     {'transform_n_nonzero_coefs': 5}),
    ('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]

reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
    print(title + '...')
    reconstructions[title] = lena.copy()
    t0 = time()
    dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
    code = dico.transform(data)
    patches = np.dot(code, V)

    if transform_algorithm == 'threshold':
        patches -= patches.min()
        patches /= patches.max()

    patches += intercept
    patches = patches.reshape(len(data), *patch_size)
    if transform_algorithm == 'threshold':
        patches -= patches.min()
        patches /= patches.max()
    reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
        patches, (width, height // 2))
    dt = time() - t0
开发者ID:heidekrueger,项目名称:CaseStudiesMachineLearning,代码行数:32,代码来源:sklearn_example.py

示例2: BoVWFeature

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import set_params [as 别名]
class BoVWFeature(TransformerMixin):
    """ 
    Extract BoVW Feature
        
    Parameters
    ----------
    codebook_size : int
      the size of codebook, default:1000
    
    method : str
      codebook's compute method , value: 'sc','km'
      
    """
    def __init__(self, codebook_size=512, method='sc'):
        self.codebook_size = codebook_size
        self.method = method
        self.patch_num = 40000
        self.patch_size = 8
        self.sample = 'random'
        self.feature = 'raw' # raw, surf, hog

    
    def fit(self, X, y=None):
        # compute the codes
        print 'Extracting patchs...'
        patchs = []
        num = self.patch_num // X.size
        for x in X:
            img = imread(str(x[0]))
            tmp = extract_patches_2d(img, (self.patch_size,self.patch_size), \
                                     max_patches=num, random_state=np.random.RandomState())
            patchs.append(tmp)
        data = np.vstack(patchs)
        data = data.reshape(data.shape[0], -1)
        
        data -= np.mean(data, axis=0)
        data = data/np.std(data, axis=0)
        
        print 'Learning codebook...'
        if self.method == 'sc':
            self.dico = MiniBatchDictionaryLearning(n_components=self.codebook_size, \
                                               alpha=1, n_iter=100, batch_size =100, verbose=True)
            self.dico.fit(data)
        elif self.method=='km':
            # self.dico = MiniBatchKMeans(n_clusters=self.codebook_size)
            pass
        
        return self
    
    def transform(self, X):
        """         
        Parameters
        ----------
        X : {array-like}, shape = [n_samples, 1]
            Training vectors, where n_samples is the number of samples and
            1 is image path.
      
        Returns
        -------

          array-like = [n_samples, features]
            Class labels predicted by each classifier.
        
        """
        print 'Extracting feature...'
        # setting the dictionary
        self.dico.set_params(transform_algorithm='lars')
        results = []
        for sample in X:
            img = imread(str(sample[0]))
            tmp = extract_patches_2d(img, (self.patch_size,self.patch_size), \
                                     max_patches=300, random_state=np.random.RandomState())
            data = tmp.reshape(tmp.shape[0], -1)
            data = data-np.mean(data, axis=0)
            data = data/np.std(data, axis=0)
            code = self.dico.transform(data)
            results.append(code.sum(axis=0))
        return np.vstack(results)
    
    def get_params(self, deep=True):
        return {"codebook_size": self.codebook_size}
开发者ID:AI42,项目名称:CNN-detection-tracking,代码行数:83,代码来源:BOVW.py

示例3: Timer

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import set_params [as 别名]
		with Timer("Fitting model ..."):
			# Fit the sparse model using Dictionary Learning.
			cols = ceil(sqrt(N_COMP))
			rows = ceil(N_COMP / float(cols))
			model = MiniBatchDictionaryLearning(n_components = N_COMP, alpha = 1)
			fit = model.fit(data)
	if (not restart) or steps[restart] <= steps['DISPLAY_BASIS']:
		with Timer("Display components ..."): 
			# Display the basis components (aka the dictionary).
			pylab.ion()
			pylab.show()
			display(fit)
	if (not restart) or steps[restart] <= steps['COMPUTE_PROJ']:
		with Timer("Compute projection ..."):
			# Project the input patches onto the basis using Orthonormal Matching Pursuit with 2 components.
			model.set_params(transform_algorithm = 'omp', transform_n_nonzero_coefs = N_ATOMS)
			# the intention is simply this:
			#	code = model.transform(data)
			# but we chunk it up and store it in a sparse matrix for efficiency
			code = []
			CHUNK = 1000
			for i in xrange(0, len(data), CHUNK):
				data_i = data[i:i + CHUNK]
				code_i = model.transform(data_i)
				code_i = csr_matrix(code_i)
				code.append(code_i)
			code = sparse.vstack(code)
	if (not restart) or steps[restart] <= steps['RECONSTRUCT']:
		with Timer("Reconstruct images ..."):
			# Reconstruct the input images from the projected patches.
			basis = fit.components_ 
开发者ID:gsidier,项目名称:ffoct,代码行数:33,代码来源:ffoct.py

示例4: imageDenoisingTest01

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import set_params [as 别名]

#.........这里部分代码省略.........
	print "Learning the dictionary"
	t0 = time()
	#这一步是开始对patches进行学习
	#new 一个model
	dico = MiniBatchDictionaryLearning(n_components = 100, alpha = 1, n_iter = 5000)

	print data.shape  #data是30500 * 49维矩阵
	V = dico.fit(data).components_

	print V.shape #V是100 * 49维矩阵
	dt = time() - t0

	print "done in %.2fs." % dt

	plt.figure(figsize = (4.2, 4))
	for i, comp in enumerate(V[:100]):
		plt.subplot(10, 10, i + 1)
		plt.imshow(comp.reshape(patch_size), cmap = plt.cm.gray_r, interpolation = "nearest")
		plt.xticks(())
		plt.yticks(())

	plt.suptitle("Dictionary learned from lena patches\n" + "Train time %.1fs on %d patches" % (dt, len(data)), fontsize = 16)

	plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)

	def show_with_diff(image, reference, title):
		plt.figure(figsize = (5, 3.3))
		plt.subplot(1, 2, 1)
		plt.title('Image')
		plt.imshow(image, vmin = 0, vmax = 1, cmap = plt.cm.gray, interpolation = "nearest")

		plt.xticks(())
		plt.yticks(())
		plt.subplot(1,2,2)

		difference = image - reference

		plt.title("difference (norm: %.2f)" % np.sqrt(np.sum(difference ** 2)))

		plt.imshow(difference, vmin = -0.5, vmax = 0.5, cmap = plt.cm.PuOr, interpolation = "nearest")
		plt.xticks(())
		plt.yticks(())
		plt.suptitle(title, size = 16)

		plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.02)


	show_with_diff(distorted, lena, "Distorted Image")




	#plt.show()

	#Extract noisy patches and reconstruct them using the dictionary
	#从右半边抽取patches
	print('Extracting noisy pathces...')
	t0 = time()
	data = extract_patches_2d(distorted[:, height//2:], patch_size)
	data = data.reshape(data.shape[0], -1)
	intercept = np.mean(data, axis = 0)
	data -= intercept

	print "done in %.2fs. " % (time() - t0)

	transform_algorithms = [('Orthogonal Matching Pursuit\n1 atom', 'omp',
							{'transform_n_nonzero_coefs': 1}),
							('Orthogonal Matching Pursuit\n2 atoms', 'omp',
							{'transform_n_nonzero_coefs': 2}),
							('Least-angle regression\n5 atoms', 'lars',
							{'transform_n_nonzero_coefs': 5}),
							('Thresholding\n alpha = 0.1', 'threshold',
							{'transform_alpha': 0.1})]

	reconstructions = {}
	for title, transform_algorithm, kwargs in transform_algorithms:
		print title + "..."
		reconstructions[title] = lena.copy()
		t0 = time()
		dico.set_params(transform_algorithm = transform_algorithm, **kwargs)
		code = dico.transform(data) #利用之前训练的模型来获得代表系数 -- code
		patches = np.dot(code, V)

		if transform_algorithm == "threshold":
			patches -= patches.min()
			patches /= patches.max()

		patches += intercept
		patches = patches.reshape(len(data), *patch_size)

		if transform_algorithm == "threshold":
			patches -= patches.min()
			patches /= patches.max()

		reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(patches, (width, height // 2))
		dt = time() - t0
		print "done in %.2fs." % dt
		show_with_diff(reconstructions[title], lena, title + '(time: %.1fs)' % dt)

	plt.show()
开发者ID:hyliu0302,项目名称:scikit-learn-notes,代码行数:104,代码来源:myScikitLearnFcns.py

示例5: fit

# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import set_params [as 别名]
    def fit(self, X, y, **dump_kwargs):
        if self.debug_folder is not None:
            self.dump_init()
        X_ref = self.fm_decoder.fm_to_csr(X, y)
        n_iter = X_ref.shape[0] * self.n_epochs // self.batch_size

        random_state = check_random_state(self.random_state)
        dict_init = random_state.randn(self.n_components, X_ref.shape[1])

        dict_learning = MiniBatchDictionaryLearning(
                n_components=self.n_components,
                alpha=self.alpha,
                transform_alpha=self.alpha,
                fit_algorithm=self.algorithm,
                transform_algorithm=self.algorithm,
                dict_init=dict_init,
                l1_ratio=self.l1_ratio,
                batch_size=self.batch_size,
                shuffle=True,
                fit_intercept=self.fit_intercept,
                n_iter=n_iter,
                missing_values=0,
                learning_rate=self.learning_rate,
                learning_rate_offset=self.learning_rate_offset,
                verbose=3,
                debug_info=self.debug_folder is not None,
                random_state=self.random_state)

        if self.fit_intercept:
            self.dictionary_ = np.r_[np.ones((1, dict_init.shape[1])),
                                     dict_init]
            self.code_ = np.zeros((X.shape[0], self.n_components + 1))
        else:
            self.dictionary_ = dict_init
            self.code_ = np.zeros((X.shape[0], self.n_components))

        if self.debug_folder is None:
            (X_csr, self.global_mean_,
             self.sample_mean_, self.feature_mean_) = csr_center_data(X_ref)
            for i in range(self.n_epochs):
                dict_learning.partial_fit(X_csr, deprecated=False)
                if self.decreasing_batch_size:
                    dict_learning.set_params(batch_size=
                                             dict_learning.batch_size // 2)
            self.n_iter_ = dict_learning.n_iter_
            self.dictionary_ = dict_learning.components_
            self.code_ = dict_learning.transform(X_csr)

        if self.debug_folder is not None:
            (X_csr, self.global_mean_,
             self.sample_mean_, self.feature_mean_) = csr_center_data(X_ref)
            self.dump_inter(**dump_kwargs)

            for i in range(self.n_epochs):
                permutation = random_state.permutation(X_csr.shape[0])

                batches = gen_batches(X_csr.shape[0],
                                      X_csr.shape[0] // 5 + 1)
                last_seen = 0
                for batch in batches:
                    last_seen = max(batch.stop, last_seen)
                    dict_learning.partial_fit(X_csr[permutation[batch]],
                                              deprecated=False)
                    self.dictionary_ = dict_learning.components_
                    self.code_[permutation[:last_seen]] = dict_learning.\
                        transform(X_csr[permutation[:last_seen]])
                    self.n_iter_ = dict_learning.n_iter_
                    self.dump_inter(debug_dict=dict_learning.debug_info_,
                                    **dump_kwargs)
                if self.decreasing_batch_size:
                    dict_learning.set_params(batch_size=
                                             dict_learning.batch_size // 2)
            self.dictionary_ = dict_learning.components_
            self.code_ = dict_learning.transform(X_csr)
        return self
开发者ID:arthurmensch,项目名称:scikit-learn-sandbox,代码行数:77,代码来源:dl_recommender.py


注:本文中的sklearn.decomposition.MiniBatchDictionaryLearning.set_params方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。