当前位置: 首页>>代码示例>>Python>>正文


Python IncrementalPCA.transform方法代码示例

本文整理汇总了Python中sklearn.decomposition.IncrementalPCA.transform方法的典型用法代码示例。如果您正苦于以下问题:Python IncrementalPCA.transform方法的具体用法?Python IncrementalPCA.transform怎么用?Python IncrementalPCA.transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.decomposition.IncrementalPCA的用法示例。


在下文中一共展示了IncrementalPCA.transform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: performPCA

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
def performPCA(source, num_components, chunk_size):
  image_paths = sorted(listdir(source), key=lambda x: (int(x.split('_')[0]), x.split('_')[1]))
  size, images = 0, []
  n_chunks = len(image_paths)//chunk_size
  pca = IncrementalPCA(n_components=num_components, batch_size=chunk_size)

  # Read in all images and do a partial fit on the PCA model.
  for i in range(n_chunks):
    print 'Chunk:', i, 'Index:', i * chunk_size + size
    while size < chunk_size:
      images.append(imread(source+image_paths[i * chunk_size + size]).flatten())
      size += 1

    pca.partial_fit(np.asarray(images))
    size, images = 0, []

    if i == n_chunks - 1:
      i += 1
      while i * chunk_size + size < len(image_paths):
        images.append(imread(source+image_paths[i * chunk_size + size]).flatten())
        size += 1
      pca.partial_fit(np.asarray(images))

  # Only works with Python 3
  #print("\nExplained variance ratios: {0}".format(pca.explained_variance_ratio_))
  #print("Sum of variance captured by components: {0}\n".format(sum(pca.explained_variance_ratio_)))

  xTransformed = None

  # Read in all images again and transform them using the PCA model.
  for i in range(n_chunks):
    while size < chunk_size:
      images.append(imread(source+image_paths[i * chunk_size + size]).flatten())
      size += 1
    print 'Chunk:', i, 'index:', i * chunk_size + size
    transformed = pca.transform(np.asarray(images))
    if xTransformed is None:
      xTransformed = transformed
    else:
      xTransformed = np.vstack((xTransformed, transformed))
    size, images = 0, []

    if i == n_chunks - 1:
      i += 1
      while i * chunk_size + size < len(image_paths):
        images.append(imread(source+image_paths[i * chunk_size + size]).flatten())
        size += 1
      transformed = pca.transform(np.asarray(images))
      xTransformed = np.vstack((xTransformed, transformed))

  print "\nTransformed matrix shape:", xTransformed.shape
  return xTransformed
开发者ID:Chenrongjing,项目名称:DiabeticRetinopathy,代码行数:54,代码来源:preprocessing.py

示例2: ipca

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
def ipca(mov, components = 50, batch =1000):
    # vectorize the images
    num_frames, h, w = mov.shape
    frame_size = h * w
    frame_samples = np.reshape(mov, (num_frames, frame_size)).T
    
    # run IPCA to approxiate the SVD
    
    ipca_f = IncrementalPCA(n_components=components, batch_size=batch)
    ipca_f.fit(frame_samples)
    
    # construct the reduced version of the movie vectors using only the 
    # principal component projection
    
    proj_frame_vectors = ipca_f.inverse_transform(ipca_f.transform(frame_samples))
        
    # get the temporal principal components (pixel time series) and 
    # associated singular values
    
    eigenseries = ipca_f.components_.T

    # the rows of eigenseries are approximately orthogonal
    # so we can approximately obtain eigenframes by multiplying the 
    # projected frame matrix by this transpose on the right
    
    eigenframes = np.dot(proj_frame_vectors, eigenseries)

    return eigenseries, eigenframes, proj_frame_vectors        
开发者ID:agiovann,项目名称:pyfluo,代码行数:30,代码来源:segmentation.py

示例3: create_pool_pca_from_files

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
def create_pool_pca_from_files(file_dir, dir_output, s, t, i):
    from sklearn.decomposition import IncrementalPCA
    ipca = IncrementalPCA(n_components=number_dim_pca)
    for counter in range(s, t, i):
        features_file = np.load(file_dir + '/pca' + str(counter) + '_code.npy')
	ipca.partial_fit(features_file[:, 0:4096])
    for counter in range(s, t, i):
        out_file = dir_output + 'pca_red_' + str(counter) + '_code.npy'
	features_file = np.load(file_dir + '/pca' + str(counter) + '_code.npy') 
	features_red = ipca.transform(features_file[:, 0:4096])
	np.save(out_file, np.append(features_red, features_file[:, 4096:], axis=1))
开发者ID:GMNetto,项目名称:CS2951t_Project,代码行数:13,代码来源:build_analysis.py

示例4: PCA

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
def PCA(source, num_components, chuck_size):
    image_path = sorted(list(source), key = lambda x: (int(x.split('_')[0]), x.split('_')[1]))
    size, images = 0, []
    n_chunks = len(image_path)//chunk_size
    pca = IncrementalPCA(n_components=num_components, batch_size=chunk_size)
    for i in range(n_chunks):
        print('Chunk:', i, '\tIndex:', i * chunk_size + size)
        while size < chunk_size:
            images.append(imread(source+image_path[i * chunk_size + size]).flatte())
            size += 1
        pca.partial_fit(np.asarray(images))
        size, images = 0, []

        if i == n_chunks - 1:
            i += 1
            print('chunk:', i, 'index:', i * chunk_size + size)
            transformed = pca.transform(np.asarray(images))
            if xTransformed is None:
                xTransformed = transformed
            else:
                xTransformed = np.vstack((xTransformed, transformed))
            size, images = 0, []
            if i == n_chunks - 1:
                i += 1
                while i * chunk_size + size < len(image_path):
                    images.append(imread(source+image_path[i * chunk_size]).flatten())
                    size += 1
                trasformed = pca.transform(np.asarray(images))
                xTransformed = np.vstack((xTransformed, transformed))
            print("\nTransformed matrix shape:", xTransformed.shape)
            return xTransformed
        if __name__ == "__main__":
            source = './train/right'
            new_size = '32x32'
            pool = Pool()
            start = time.time()
            pool.map(imageResize, zip(itertools.repeat(source), listdir(source), itertools.repeat(new_size)))
            print("Resized Images in {0} seconds".formate(time.time() - start))
开发者ID:Chenrongjing,项目名称:DiabeticRetinopathy,代码行数:40,代码来源:preprocessingv2.py

示例5: test_incremental_pca_inverse

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
def test_incremental_pca_inverse():
    """Test that the projection of data can be inverted."""
    rng = np.random.RandomState(1999)
    n, p = 50, 3
    X = rng.randn(n, p)  # spherical data
    X[:, 1] *= .00001  # make middle component relatively small
    X += [5, 4, 3]  # make a large mean

    # same check that we can find the original data from the transformed
    # signal (since the data is almost of rank n_components)
    ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
    Y = ipca.transform(X)
    Y_inverse = ipca.inverse_transform(Y)
    assert_almost_equal(X, Y_inverse, decimal=3)
开发者ID:0x0all,项目名称:scikit-learn,代码行数:16,代码来源:test_incremental_pca.py

示例6: test_singular_values

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
def test_singular_values():
    # Check that the IncrementalPCA output has the correct singular values

    rng = np.random.RandomState(0)
    n_samples = 1000
    n_features = 100

    X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
                                      effective_rank=10, random_state=rng)

    pca = PCA(n_components=10, svd_solver='full', random_state=rng).fit(X)
    ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
    assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)

    # Compare to the Frobenius norm
    X_pca = pca.transform(X)
    X_ipca = ipca.transform(X)
    assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
                              np.linalg.norm(X_pca, "fro")**2.0, 12)
    assert_array_almost_equal(np.sum(ipca.singular_values_**2.0),
                              np.linalg.norm(X_ipca, "fro")**2.0, 2)

    # Compare to the 2-norms of the score vectors
    assert_array_almost_equal(pca.singular_values_,
                              np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
    assert_array_almost_equal(ipca.singular_values_,
                              np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2)

    # Set the singular values and see what we get back
    rng = np.random.RandomState(0)
    n_samples = 100
    n_features = 110

    X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
                                      effective_rank=3, random_state=rng)

    pca = PCA(n_components=3, svd_solver='full', random_state=rng)
    ipca = IncrementalPCA(n_components=3, batch_size=100)

    X_pca = pca.fit_transform(X)
    X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
    X_pca[:, 0] *= 3.142
    X_pca[:, 1] *= 2.718

    X_hat = np.dot(X_pca, pca.components_)
    pca.fit(X_hat)
    ipca.fit(X_hat)
    assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
    assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
开发者ID:allefpablo,项目名称:scikit-learn,代码行数:51,代码来源:test_incremental_pca.py

示例7: PCALDA

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
class PCALDA(AbstractFeature):
    def __init__(self,options):
        for key in options:
            setattr(self,key,options[key])

    def compute(self,X,y):
        if X.ndim == 3:
            X = X.reshape((X.shape[0],X.shape[1]*X.shape[2]))
        if not hasattr(self,"pca_dim"):
            self.pca_dim = len(X)-len(np.unique(y))

        # PCA
        self.ipca = IncrementalPCA(n_components=self.pca_dim, batch_size=None)
        self.ipca.fit(X)

        X_pca = self.ipca.transform(X)
        print("PCA train shape")
        print(X_pca.shape)

        # LDA
        self.lda = sklearn.lda.LDA()
        self.lda.fit(X_pca,y)
        X_lda = self.lda.transform(X_pca)
        return X_lda


    def extract(self,x):
        X = np.array([x])
        if X.ndim == 3:
            X = X.reshape((X.shape[0],X.shape[1]*X.shape[2]))
        X_pca = self.ipca.transform(X)
        X_lda = self.lda.transform(X_pca)
        return list(X_lda[0])

    def __repr__(self):
        return "PCALDA"
开发者ID:EthnoRec,项目名称:er-data-analysis,代码行数:38,代码来源:feature.py

示例8: generate_pca_compression

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
def generate_pca_compression(X, n_components=16, batch_size=100):
    """
    Compresses the data using sklearn PCA implementation.

    :param X: Data (n_samples, n_features)
    :param n_components: Number of dimensions for PCA to keep
    :param batch_size: Batch size for incrimental PCA

    :return: X_prime (the compressed representation), pca
    """

    pca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
    pca.fit(X)

    return pca.transform(X), pca
开发者ID:TieSKey,项目名称:database_dcnn,代码行数:17,代码来源:compression.py

示例9: __init__

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
class MyPCA:

	def __init__(self, filename=None):
		if not filename:
			self.model = IncrementalPCA(NUM_COMP)
		else:
			with open(filename, 'r') as f:
				self.model = pickle.load(f)

	def train(self, X):
		self.model.partial_fit(X)

	def transform(self, X):
		return self.model.transform(X)	

	def dump(self, filename):
		with open(filename, 'w') as f:
			pickle.dump(self.model, f)
开发者ID:avg14,项目名称:galaxyzoo,代码行数:20,代码来源:mypca.py

示例10: test_whitening

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
def test_whitening():
    """Test that PCA and IncrementalPCA transforms match to sign flip."""
    X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
                                      effective_rank=2, random_state=1999)
    prec = 3
    n_samples, n_features = X.shape
    for nc in [None, 9]:
        pca = PCA(whiten=True, n_components=nc).fit(X)
        ipca = IncrementalPCA(whiten=True, n_components=nc,
                              batch_size=250).fit(X)

        Xt_pca = pca.transform(X)
        Xt_ipca = ipca.transform(X)
        assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
        Xinv_ipca = ipca.inverse_transform(Xt_ipca)
        Xinv_pca = pca.inverse_transform(Xt_pca)
        assert_almost_equal(X, Xinv_ipca, decimal=prec)
        assert_almost_equal(X, Xinv_pca, decimal=prec)
        assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
开发者ID:0x0all,项目名称:scikit-learn,代码行数:21,代码来源:test_incremental_pca.py

示例11: ipca

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
	def ipca(self, X, n_components=100):
		from sklearn.decomposition import IncrementalPCA
		# trials = h5py.File(self.path + "/trials.hdf5", 'r')
		# scaled_meg = trials['scaled_meg'] # it's ok, the dataset is not fetched to memory yet
		# scaled_meeg = trials['scaled_meeg']

		n1 = X.shape[0] # how many rows we have in the dataset
		chunk_size = 1000 # how many rows we feed to IPCA at a time, the divisor of n
		ipca = IncrementalPCA(n_components=n_components)

		for i in range(0, n1//chunk_size):
			print("{} to {} out of {}.".format(i*chunk_size,(i+1)*chunk_size,n1))
			print(X[i*chunk_size : (i+1)*chunk_size].shape)
			ipca.partial_fit(X[i*chunk_size : (i+1)*chunk_size])

		x = ipca.transform(X)
		print(x.shape)
		# n_comp = sum(i > 10.0e-05 for i in ipca.explained_variance_ratio_)
		# print(n_comp)
		return x
开发者ID:pawarren,项目名称:neurofusion,代码行数:22,代码来源:wrangler.py

示例12: PCASK

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
class PCASK(AbstractFeature):
    def __init__(self, n_components):
        AbstractFeature.__init__(self)
        self.n_components = n_components
        #for key in options:
            #setattr(self,key,options[key])

    def compute(self,X,y):
        if X.ndim == 3:
            X = X.reshape((X.shape[0],X.shape[1]*X.shape[2]))
        self.ipca = IncrementalPCA(n_components=self.n_components, batch_size=None)
        return self.ipca.fit_transform(X)


    def extract(self,X):
        if X.ndim == 2:
            X = X.reshape((X.shape[0]*X.shape[1]))
        return list(self.ipca.transform([X])[0])

    def __repr__(self):
        return "PCASK"
开发者ID:EthnoRec,项目名称:er-data-analysis,代码行数:23,代码来源:feature.py

示例13: IPCA

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
    def IPCA(self, components = 50, batch =1000):
        '''
        Iterative Principal Component analysis, see sklearn.decomposition.incremental_pca
        Parameters:
        ------------
        components (default 50) = number of independent components to return
        batch (default 1000)  = number of pixels to load into memory simultaneously in IPCA. More requires more memory but leads to better fit
        Returns
        -------
        eigenseries: principal components (pixel time series) and associated singular values
        eigenframes: eigenframes are obtained by multiplying the projected frame matrix by the projected movie (whitened frames?)
        proj_frame_vectors:the reduced version of the movie vectors using only the principal component projection
        '''
        # vectorize the images
        num_frames, h, w = np.shape(self);
        frame_size = h * w;
        frame_samples = np.reshape(self, (num_frames, frame_size)).T

        # run IPCA to approxiate the SVD
        ipca_f = IncrementalPCA(n_components=components, batch_size=batch)
        ipca_f.fit(frame_samples)

        # construct the reduced version of the movie vectors using only the
        # principal component projection

        proj_frame_vectors = ipca_f.inverse_transform(ipca_f.transform(frame_samples))

        # get the temporal principal components (pixel time series) and
        # associated singular values

        eigenseries = ipca_f.components_.T

        # the rows of eigenseries are approximately orthogonal
        # so we can approximately obtain eigenframes by multiplying the
        # projected frame matrix by this transpose on the right

        eigenframes = np.dot(proj_frame_vectors, eigenseries)

        return eigenseries, eigenframes, proj_frame_vectors
开发者ID:agiovann,项目名称:Constrained_NMF,代码行数:41,代码来源:movies.py

示例14: project

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
    def project(self, ndim=None):
        """ Projects the data object given to the constructor onto `ndim` dimensions

        Parameters
        ----------
        ndim : int
            The number of dimensions we want to project the data on.

        Returns
        -------
        dataTica : :class:`MetricData <htmd.metricdata.MetricData>` object
            A new :class:`MetricData <htmd.metricdata.MetricData>` object containing the projected data

        Example
        -------
        >>> gw = GWPCA(data)
        >>> dataproj = gw.project(5)
        """
        from sklearn.decomposition import IncrementalPCA
        from htmd.progress.progress import ProgressBar
        from htmd.metricdata import MetricData

        pca = IncrementalPCA(n_components=ndim, batch_size=10000)
        p = ProgressBar(len(self.data.dat))
        for d in self.data.dat:
            pca.partial_fit(d * self.weights)
            p.progress()
        p.stop()

        projdata = self.data.copy()
        p = ProgressBar(len(self.data.dat))
        for i, d in enumerate(self.data.dat):
            projdata.dat[i] = pca.transform(d * self.weights)
            p.progress()
        p.stop()

        # projdataconc = pca.fit_transform(self.weighedconcat)
        # projdata.dat = projdata.deconcatenate(projdataconc)
        return projdata
开发者ID:Acellera,项目名称:htmd,代码行数:41,代码来源:gwpca.py

示例15: PCAIncremental

# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import transform [as 别名]
class PCAIncremental(PCAnalyzer):
  """ Incremental PCA -- used to batch input over time/space """
  def __init__(self, components):
    PCAnalyzer.__init__(self)
    if isinstance(components, int):
      self.n_components = components
    self.pca = IncrementalPCA(n_components=components, batch_size=500)
    self.num_seen = 0
    self.type = 'incremental'

  def solve(self, X):
    self.dim = np.prod(X.shape[1:])
    self.pca.partial_fit(X.reshape(len(X), self.dim))
    self.trainsize += len(X)

  def project(self, X):
    if isinstance(X, list):
      X = np.array(X)
    dimX = np.prod(X.shape[1:])
    if dimX != self.dim:
      logging.error('Projection Error in PCA: Cannot reshape/project %s size data using PC Vects of size, %s', str(X.shape), str(self.dim))
      return None
    projection = self.pca.transform(X.reshape(len(X), dimX))
    return projection
开发者ID:DaMSL,项目名称:ddc,代码行数:26,代码来源:pca.py


注:本文中的sklearn.decomposition.IncrementalPCA.transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。