当前位置: 首页>>代码示例>>Python>>正文


Python FactorAnalysis.fit_transform方法代码示例

本文整理汇总了Python中sklearn.decomposition.FactorAnalysis.fit_transform方法的典型用法代码示例。如果您正苦于以下问题:Python FactorAnalysis.fit_transform方法的具体用法?Python FactorAnalysis.fit_transform怎么用?Python FactorAnalysis.fit_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.decomposition.FactorAnalysis的用法示例。


在下文中一共展示了FactorAnalysis.fit_transform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: factor_analyses

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
def factor_analyses(results_dir):
	data_array = np.genfromtxt(os.path.join(results_dir,'summary.csv'),delimiter=',')
	fa1 = FactorAnalysis(n_components = 1)
	new_array_gbm = fa1.fit_transform(np.transpose(data_array[range(15)]))
	print new_array_gbm.shape
	fa2 = FactorAnalysis(n_components = 1)
	new_array_tree = fa2.fit_transform(np.transpose(data_array[range(41,51) + range(54,64)]))
	print new_array_tree.shape

	fa3 = FactorAnalysis(n_components = 1)
	new_array_lin = fa3.fit_transform(np.transpose(data_array[range(27,41) + range(51,54)]))

	fa4 = FactorAnalysis(n_components = 1)
	new_array_knn = fa4.fit_transform(np.transpose(data_array[range(16,27)]))

	datasets = [line.rstrip('\n') for line in open(os.path.join(results_dir, 'datasets.csv'), 'r').readlines()]
	methods = [line.rstrip('\n') for line in open(os.path.join(results_dir, 'methods.csv'), 'r').readlines()]
	figure()
	pretty_scatter(new_array_tree, [1 for x in range(115)], data_array[46], 200*np.ones(new_array_tree.shape), ['' for d in datasets])
	xlabel('Dimension 1')
	ylabel('Arbitrary Dimension 2')
	colorbar()

	figure()

	plot(new_array_lin, new_array_tree, 'bo')
	xlabel('Linear')
	ylabel('Tree + RF')

	figure()
	subplot(2,2,1)
	scatter(new_array_gbm, new_array_tree)
	xlabel('GBM')
	ylabel('Tree + RF')

	#figure()
	subplot(2,2,2)
	scatter(new_array_knn, new_array_tree)
	xlabel('KNN')
	ylabel('Tree + RF')

	#figure()
	subplot(2,2,3)
	scatter(new_array_knn, new_array_lin)
	xlabel('KNN')
	ylabel('Linear')

	subplot(2,2,4)
	scatter(new_array_gbm, new_array_lin)
	xlabel('GBM')
	ylabel('Linear')
	show()
开发者ID:jamesrobertlloyd,项目名称:dataset-space,代码行数:54,代码来源:data_analysis.py

示例2: reduceDataset

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
 def reduceDataset(self,nr=3,method='PCA'):
     '''It reduces the dimensionality of a given dataset using different techniques provided by Sklearn library
      Methods available:
                         'PCA'
                         'FactorAnalysis'
                         'KPCArbf','KPCApoly'
                         'KPCAcosine','KPCAsigmoid'
                         'IPCA'
                         'FastICADeflation'
                         'FastICAParallel'
                         'Isomap'
                         'LLE'
                         'LLEmodified'
                         'LLEltsa'
     '''
     dataset=self.ModelInputs['Dataset']
     #dataset=self.dataset[Model.in_columns]
     #dataset=self.dataset[['Humidity','TemperatureF','Sea Level PressureIn','PrecipitationIn','Dew PointF','Value']]
     #PCA
     if method=='PCA':
         sklearn_pca = sklearnPCA(n_components=nr)
         reduced = sklearn_pca.fit_transform(dataset)
     #Factor Analysis
     elif method=='FactorAnalysis':
         fa=FactorAnalysis(n_components=nr)
         reduced=fa.fit_transform(dataset)
     #kernel pca with rbf kernel
     elif method=='KPCArbf':
         kpca=KernelPCA(nr,kernel='rbf')
         reduced=kpca.fit_transform(dataset)
     #kernel pca with poly kernel
     elif method=='KPCApoly':
         kpca=KernelPCA(nr,kernel='poly')
         reduced=kpca.fit_transform(dataset)
     #kernel pca with cosine kernel
     elif method=='KPCAcosine':
         kpca=KernelPCA(nr,kernel='cosine')
         reduced=kpca.fit_transform(dataset)
     #kernel pca with sigmoid kernel
     elif method=='KPCAsigmoid':
         kpca=KernelPCA(nr,kernel='sigmoid')
         reduced=kpca.fit_transform(dataset)
     #ICA
     elif method=='IPCA':
         ipca=IncrementalPCA(nr)
         reduced=ipca.fit_transform(dataset)
     #Fast ICA
     elif method=='FastICAParallel':
         fip=FastICA(nr,algorithm='parallel')
         reduced=fip.fit_transform(dataset)
     elif method=='FastICADeflation':
         fid=FastICA(nr,algorithm='deflation')
         reduced=fid.fit_transform(dataset)
     elif method == 'All':
         self.dimensionalityReduction(nr=nr)
         return self
     
     self.ModelInputs.update({method:reduced})
     self.datasetsAvailable.append(method)
     return self
开发者ID:UIUC-SULLIVAN,项目名称:ThesisProject_Andrea_Mattera,代码行数:62,代码来源:Classes.py

示例3: factor_analysis

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
def factor_analysis(results_dir):
	data_array = np.transpose(np.genfromtxt(os.path.join(results_dir,'summary.csv'),delimiter=','))
	fa = FactorAnalysis(n_components = 2)
	new_array = fa.fit_transform(data_array)
	print fa.get_covariance().shape
	print new_array
	np.savetxt(os.path.join(results_dir,'FA-datasets-2.csv'), new_array, delimiter=',')
开发者ID:jamesrobertlloyd,项目名称:dataset-space,代码行数:9,代码来源:data_analysis.py

示例4: fit_factor_analysis

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
def fit_factor_analysis(percentage=0.8):
    """
    Runs the factor analysis.

    Parameters:

        percentage: float, default:0.8

        The percentage of the cumulative sum of the eigenvalues to be held. This number defines the number of loading factors in the analysis.

    Returns:
        
        X: array of floats [n_samples,n_factors]

            The transformed data after the factor analysis.

        components: array of floats [n_factors,n_samples]

            The components of the factor analysis
    """
    fa = FactorAnalysis()
    fa.fit(data)
    C = fa.get_covariance()
    l,e = np.linalg.eigh(C)
    cs = np.cumsum(l[::-1])/np.sum(l)
    n = np.sum(cs<percentage)

    fa.n_components = n
    X_ = fa.fit_transform(data)
    components = fa.components_
    return X_,components
开发者ID:pedropazzini,项目名称:factor_analysis,代码行数:33,代码来源:factor_analysis_script.py

示例5: dimensionalityReduction

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
 def dimensionalityReduction(self,nr=5):
     '''It applies all the dimensionality reduction techniques available in this class:
     Techniques available:
                         'PCA'
                         'FactorAnalysis'
                         'KPCArbf','KPCApoly'
                         'KPCAcosine','KPCAsigmoid'
                         'IPCA'
                         'FastICADeflation'
                         'FastICAParallel'
                         'Isomap'
                         'LLE'
                         'LLEmodified'
                         'LLEltsa'
     '''
     dataset=self.ModelInputs['Dataset']
     sklearn_pca = sklearnPCA(n_components=nr)
     p_components = sklearn_pca.fit_transform(dataset)
     fa=FactorAnalysis(n_components=nr)
     factors=fa.fit_transform(dataset)
     kpca=KernelPCA(nr,kernel='rbf')
     rbf=kpca.fit_transform(dataset)
     kpca=KernelPCA(nr,kernel='poly')
     poly=kpca.fit_transform(dataset)
     kpca=KernelPCA(nr,kernel='cosine')
     cosine=kpca.fit_transform(dataset)
     kpca=KernelPCA(nr,kernel='sigmoid')
     sigmoid=kpca.fit_transform(dataset)
     ipca=IncrementalPCA(nr)
     i_components=ipca.fit_transform(dataset)
     fip=FastICA(nr,algorithm='parallel')
     fid=FastICA(nr,algorithm='deflation')
     ficaD=fip.fit_transform(dataset)
     ficaP=fid.fit_transform(dataset)
     '''isomap=Isomap(n_components=nr).fit_transform(dataset)
     try:
         lle1=LocallyLinearEmbedding(n_components=nr).fit_transform(dataset)
     except ValueError:
         lle1=LocallyLinearEmbedding(n_components=nr,eigen_solver='dense').fit_transform(dataset)
     try:
         
         lle2=LocallyLinearEmbedding(n_components=nr,method='modified').fit_transform(dataset)
     except ValueError:
         lle2=LocallyLinearEmbedding(n_components=nr,method='modified',eigen_solver='dense').fit_transform(dataset) 
     try:
         lle3=LocallyLinearEmbedding(n_components=nr,method='ltsa').fit_transform(dataset)
     except ValueError:
         lle3=LocallyLinearEmbedding(n_components=nr,method='ltsa',eigen_solver='dense').fit_transform(dataset)'''
     values=[p_components,factors,rbf,poly,cosine,sigmoid,i_components,ficaD,ficaP]#,isomap,lle1,lle2,lle3]
     keys=['PCA','FactorAnalysis','KPCArbf','KPCApoly','KPCAcosine','KPCAsigmoid','IPCA','FastICADeflation','FastICAParallel']#,'Isomap','LLE','LLEmodified','LLEltsa']
     self.ModelInputs.update(dict(zip(keys, values)))
     [self.datasetsAvailable.append(key) for key in keys ]
     
     #debug
     #dataset=pd.DataFrame(self.ModelInputs['Dataset'])
     #dataset['Output']=self.ModelOutput
     #self.debug['Dimensionalityreduction']=dataset
     ###
     return self
开发者ID:UIUC-SULLIVAN,项目名称:ThesisProject_Andrea_Mattera,代码行数:61,代码来源:Classes.py

示例6: factor_analysis

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
def factor_analysis( data ):
    fa = FactorAnalysis()
    features = numerical_features + categorical_features
    fa_data = fa.fit_transform( data[features] )
    plt.figure()
    plt.subplot(2,2,0)
    plt.scatter( fa_data[:,0], fa_data[:,1], c=data[target] )
    plt.subplot(2,2,1)
    plt.scatter( fa_data[:,2], fa_data[:,3], c=data[target] )
    plt.subplot(2,2,2)
    plt.scatter( fa_data[:,4], fa_data[:,5], c=data[target] )
    plt.subplot(2,2,3)
    plt.scatter( fa_data[:,6], fa_data[:,7], c=data[target] )
    return fa_data
开发者ID:Benjamin-Knoepfle,项目名称:toolbox,代码行数:16,代码来源:feature_exploration.py

示例7: testAlgorithm

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
def testAlgorithm():
    import matplotlib.pyplot as plt

    random.seed(35)
    np.random.seed(32)

    n = 200
    d = 20
    k = 2
    sigma = .3
    n_clusters = 3
    decay_coef = .1

    X, Y, Z, ids = generateSimulatedDimensionalityReductionData(n_clusters, n, d, k, sigma, decay_coef)

    Zhat, params = block_ZIFA.fitModel(Y, k)
    colors = ['red', 'blue', 'green']
    cluster_ids = sorted(list(set(ids)))
    model = FactorAnalysis(n_components=k)
    factor_analysis_Zhat = model.fit_transform(Y)

    plt.figure(figsize=[15, 5])

    plt.subplot(131)
    for id in cluster_ids:
        plt.scatter(Z[ids == id, 0], Z[ids == id, 1], color=colors[id - 1], s=4)
        plt.title('True Latent Positions\nFraction of Zeros %2.3f' % (Y == 0).mean())
        plt.xlim([-4, 4])
        plt.ylim([-4, 4])

    plt.subplot(132)
    for id in cluster_ids:
        plt.scatter(Zhat[ids == id, 0], Zhat[ids == id, 1], color=colors[id - 1], s=4)
        plt.xlim([-4, 4])
        plt.ylim([-4, 4])
        plt.title('ZIFA Estimated Latent Positions')
        # title(titles[method])

    plt.subplot(133)
    for id in cluster_ids:
        plt.scatter(factor_analysis_Zhat[ids == id, 0], factor_analysis_Zhat[ids == id, 1], color = colors[id - 1], s = 4)
        plt.xlim([-4, 4])
        plt.ylim([-4, 4])
        plt.title('Factor Analysis Estimated Latent Positions')

    plt.show()
开发者ID:epierson9,项目名称:ZIFA,代码行数:48,代码来源:example.py

示例8: initialize

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
def initialize(trials, params, config):
    """Make skeleton"""
    # TODO: fast initialization for large dataset
    from sklearn.decomposition import FactorAnalysis

    zdim = params["zdim"]
    xdim = params["xdim"]

    # TODO: use only a subsample of trials?
    y = np.concatenate([trial["y"] for trial in trials], axis=0)
    subsample = np.random.choice(y.shape[0], max(y.shape[0] // 10, 50))
    ydim = y.shape[-1]
    fa = FactorAnalysis(n_components=zdim, random_state=0)
    z = fa.fit_transform(y[subsample, :])
    a = fa.components_
    b = np.log(np.maximum(np.mean(y, axis=0, keepdims=True), config["eps"]))
    noise = np.var(y[subsample, :] - z @ a, ddof=0, axis=0)

    # stupid way of update
    # two cases
    # 1) no key
    # 2) empty value (None)
    if params.get("a") is None:
        params.update(a=a)
    if params.get("b") is None:
        params.update(b=b)
    if params.get("noise") is None:
        params.update(noise=noise)

    for trial in trials:
        length = trial["y"].shape[0]

        if trial.get("mu") is None:
            trial.update(mu=fa.transform(trial["y"]))

        if trial.get("x") is None:
            trial.update(x=np.ones((length, xdim, ydim)))

        trial.update({"w": np.zeros((length, zdim)), "v": np.zeros((length, zdim))})
开发者ID:catniplab,项目名称:vLGP,代码行数:41,代码来源:preprocess.py

示例9: PCA

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
# X = np.dot(S, A.T)  # Generate observations

rng = np.random.RandomState(42)
S = rng.normal(scale=0.01,size=(10000, 2))
S[:,1][::2] *= 1.7
S[:,0][::2] /= 1.7
S[:,1][1::2] /= 1.7
S[:,0][1::2] *= 1.7
X=deepcopy(S)
X[:,1] = X[:,0]/-2+X[:,1]

pca = PCA()
S_pca_ = pca.fit_transform(X)

fa = FactorAnalysis(svd_method="lapack")
S_fa_ = fa.fit_transform(X)

ica = FastICA(max_iter=20000, tol=0.00001)
S_ica_ = ica.fit_transform(X)  # Estimate the sources


###############################################################################
# Plot results

def plot_samples(S, axis_list=None):
    plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
                color='steelblue', alpha=0.5)
    if axis_list is not None:
        colors = ['orange', 'red']
        for color, axis in zip(colors, axis_list):
            axis /= axis.std()
开发者ID:TheChymera,项目名称:FANS,代码行数:33,代码来源:decomposition_test.py

示例10: PCA

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA, FastICA, FactorAnalysis

rng = np.random.RandomState(42)
s = rng.normal(scale=0.01,size=(4,1000))
S = np.ones((3,1000))
S[0] = s[0]
S[1] = s[1]
S[2] = s[0]+s[1]

pca = PCA()
S_pca_ = pca.fit_transform(S.T)

fa = FactorAnalysis(svd_method="lapack")
S_fa_ = fa.fit_transform(S.T)

ica = FastICA(max_iter=20000, tol=0.00001)
S_ica_ = ica.fit_transform(S.T)  # Estimate the sources

def plot_3d(data, ax, axis_list=None):
	data /= np.std(data)
	ax.scatter(data[0] ,data[1], data[2] , s=2, marker='o', zorder=10, color='steelblue', alpha=0.5)
	ax.set_xlim(-4, 4)
	ax.set_ylim(-4, 4)
	ax.set_zlim(-4, 4)
	ax.set_xlabel('x')
	ax.set_ylabel('y')
	ax.set_zlabel('z')
	for label in (ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()):
		label.set_fontsize(6)
开发者ID:TheChymera,项目名称:FANS,代码行数:33,代码来源:3d_decomposition.py

示例11: base

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
def base(
    use_filter="default",
    data_path="~/data/faons/latest.csv",
    filter_name="default.csv",
    participant_subset="",
    drop_metadata=True,
    drop=[],
    clean=7,
    components=5,
    facecolor="#ffffff",
):

    data_path = path.expanduser(data_path)
    filter_path = path.join(path.dirname(path.realpath(__file__)), "filters", filter_name)

    filters = pd.read_csv(
        filter_path, index_col=0, header=None
    ).transpose()  # transpose filters because of .csv file formatting, specify index_col to not get numbered index
    all_data = pd.read_csv(data_path)

    all_data = all_data[map(lambda y: len(set(y)) > clean, np.array(all_data))]

    # drops metadata
    if drop_metadata == True:
        all_data = all_data.drop(filters["metadata"][pd.Series.notnull(filters["metadata"])], axis=1)

        # compile list of column names to be dropped:
    drop_list = []
    for drop_item in drop:
        drop_list += list(filters[drop_item][pd.Series.notnull(filters[drop_item])])
    drop_list = list(
        set(drop_list)
    )  # get unique column names (the list may contain duplicates if overlaying multiple filters)
    all_data = all_data.drop(drop_list, axis=1)

    if participant_subset == "odd":
        keep_rows = all_data.index.values[1::2]
        filtered_data = all_data.ix[keep_rows]
    elif participant_subset == "even":
        keep_rows = all_data.index.values[0::2]
        filtered_data = all_data.ix[keep_rows]
    elif participant_subset == "male":
        filtered_data = all_data[all_data["My legal gender:"] == "Male"]
    elif participant_subset == "female":
        filtered_data = all_data[all_data["My legal gender:"] == "Female"]
    else:
        filtered_data = all_data

        # convert to correct type for analysis:
    filtered_data_array = np.array(filtered_data, dtype="float64")

    filtered_data_array = filtered_data_array / 100

    pca = PCA()
    S_pca_ = pca.fit_transform(filtered_data_array)

    fa = FactorAnalysis(svd_method="lapack")
    S_fa_ = fa.fit_transform(filtered_data_array)

    ica = FastICA(n_components=components, max_iter=20000, tol=0.00001)
    S_ica_ = ica.fit_transform(filtered_data_array)  # Estimate the sources

    load = ica.mixing_

    remapped_cmap = remappedColorMap(
        cm.PiYG,
        start=(np.max(load) - abs(np.min(load))) / (2 * np.max(load)),
        midpoint=abs(np.min(load)) / (np.max(load) + abs(np.min(load))),
        name="shrunk",
    )
    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(17.5, 5), facecolor=facecolor)
    graphic = ax.imshow(load, cmap=remapped_cmap, interpolation="none")
开发者ID:TheChymera,项目名称:FANS,代码行数:74,代码来源:icans.py

示例12: compute_FA

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
def compute_FA(df):
    FA = FactorAnalysis()
    return FA.fit_transform(df)
开发者ID:real-limoges,项目名称:match-terpiece,代码行数:5,代码来源:feature_reduction.py

示例13: range

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
    preds.append([])
    certainty.append([])
    # each network has a vote in that cross validation fold
    for s in range(len(seeds)):
        X = np.vstack([np.array(g1_fmri[s]), np.array(g2_fmri[s])])
        y = np.array(labels)
        X = preprocessing.scale(X)

        print 'seed %d: cv %d/%d'%(s+1,oidx+1,nobs)
        X_train = X[train]
        X_test = X[test]
        y_train = y[train]
        y_test = y[test]
        c_val_scores = []
        dimred = FactorAnalysis(n_components=20)
        X_train = dimred.fit_transform(X_train)
        X_test = dimred.transform(X_test)
        for c in cs:
            inner_preds = []
            clf = LogisticRegression(C=c, penalty="l1", dual=False, class_weight='auto')
            for iidx, (itrain, itest) in enumerate(inner_cv):
                X_inner_train = X_train[itrain]
                X_val = X_train[itest]
                y_inner_train = y_train[itrain]
                y_val = y_train[itest]
                scaler = preprocessing.StandardScaler().fit(X_inner_train)
                X_inner_train = scaler.transform(X_inner_train)
                X_val = scaler.transform(X_val)
                clf.fit(X_inner_train, y_inner_train)
                inner_preds.append(clf.predict(X_val))
            c_val_scores.append(f1_score(y_train, inner_preds, pos_label=1))
开发者ID:gsudre,项目名称:research_code,代码行数:33,代码来源:classify_rest_meg.py

示例14: FactorAnalysis

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
#For example least 98 percent of the variance 98%的能量
pca = decomposition.PCA(n_components=.98)
iris_X_prime = pca.fit(iris_X)
pca.explained_variance_ratio_.sum()
#1.0


#Using factor analysis for decomposition 因子分析降维

#Factor analysis is another technique we can use to reduce dimensionality. However, factor
#analysis makes assumptions and PCA does not. The basic assumption is that there are
#implicit features responsible for the features of the dataset.

from sklearn.decomposition import FactorAnalysis
fa = FactorAnalysis(n_components=2)
iris_two_dim = fa.fit_transform(iris.data)
iris_two_dim[:5]
#array([[-1.33125848, 0.55846779],
#[-1.33914102, -0.00509715],
#[-1.40258715, -0.307983 ],
#[-1.29839497, -0.71854288],
#[-1.33587575, 0.36533259]])


#Kernel PCA for nonlinear dimensionality reduction

#产生非线性数据
import numpy as np
A1_mean = [1, 1]
A1_cov = [[2, .99], [1, 1]]
A1 = np.random.multivariate_normal(A1_mean, A1_cov, 50)
开发者ID:chenzhongtao,项目名称:source,代码行数:33,代码来源:premodel.py

示例15: int

# 需要导入模块: from sklearn.decomposition import FactorAnalysis [as 别名]
# 或者: from sklearn.decomposition.FactorAnalysis import fit_transform [as 别名]
import sys
from sklearn.decomposition import FactorAnalysis
from sklearn.datasets import load_svmlight_file, dump_svmlight_file

if __name__ == "__main__":
    svm_file = sys.argv[1]
    dim = int(sys.argv[2])
    fa = FactorAnalysis(
        n_components=dim, 
        tol=0.01, 
        copy=False,
        max_iter=1000, 
        verbose=3, 
        noise_variance_init=None,
    )

    X, y = load_svmlight_file(svm_file, zero_based = False, query_id = False)
    X_new = fa.fit_transform(X.toarray(), y)

    dump_svmlight_file(X_new, y, "%s.fa%d" % (svm_file, dim), zero_based = False)



开发者ID:fengqi0423,项目名称:hahaha,代码行数:22,代码来源:factor_analysis.py


注:本文中的sklearn.decomposition.FactorAnalysis.fit_transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。